From 5a363ab97d2105ca122845ce530be634a523b1a4 Mon Sep 17 00:00:00 2001 From: Ivan Mikushin Date: Thu, 26 Nov 2015 17:41:42 +0500 Subject: [PATCH] migrate to upstream libcompose in one and a half go --- .dockerignore | 1 + .dockerignore.docker | 1 + Dockerfile.build-base | 5 +- Makefile.docker | 3 +- build.conf | 2 +- cmd/cloudinit/cloudinit.go | 2 +- cmd/cloudinit/gce.go | 2 +- cmd/control/config.go | 2 +- cmd/control/os.go | 2 +- cmd/network/network.go | 2 +- cmd/userdocker/main.go | 2 +- compose/project.go | 8 +- config/config.go | 2 +- config/config_test.go | 44 +- config/disk.go | 25 +- config/types.go | 1 + docker/client_factory.go | 12 +- docker/service.go | 94 +- glide.yaml | 42 +- os-config.yml | 32 +- util/util.go | 2 +- vendor/github.com/codegangsta/cli/.travis.yml | 15 +- vendor/github.com/codegangsta/cli/README.md | 71 +- vendor/github.com/codegangsta/cli/app.go | 85 +- vendor/github.com/codegangsta/cli/app_test.go | 570 ++- .../cli/autocomplete/bash_autocomplete | 7 +- vendor/github.com/codegangsta/cli/cli.go | 21 + vendor/github.com/codegangsta/cli/cli_test.go | 100 - vendor/github.com/codegangsta/cli/command.go | 118 +- .../codegangsta/cli/command_test.go | 66 +- vendor/github.com/codegangsta/cli/context.go | 72 +- .../codegangsta/cli/context_test.go | 30 +- vendor/github.com/codegangsta/cli/flag.go | 119 +- .../github.com/codegangsta/cli/flag_test.go | 348 +- vendor/github.com/codegangsta/cli/help.go | 112 +- .../github.com/codegangsta/cli/help_test.go | 94 + .../codegangsta/cli/helpers_test.go | 6 +- .../github.com/docker/distribution/.mailmap | 3 +- vendor/github.com/docker/distribution/AUTHORS | 20 +- .../docker/distribution/CONTRIBUTING.md | 1 + .../github.com/docker/distribution/Dockerfile | 6 +- .../docker/distribution/Godeps/Godeps.json | 135 - .../docker/distribution/Godeps/Readme | 5 - .../distribution/Godeps/_workspace/.gitignore | 2 - .../github.com/AdRoll/goamz/aws/attempt.go | 74 - .../AdRoll/goamz/aws/attempt_test.go | 57 - .../src/github.com/AdRoll/goamz/aws/aws.go | 629 --- .../github.com/AdRoll/goamz/aws/aws_test.go | 140 - .../src/github.com/AdRoll/goamz/aws/client.go | 124 - .../AdRoll/goamz/aws/export_test.go | 29 - .../github.com/AdRoll/goamz/aws/regions.go | 265 -- .../src/github.com/AdRoll/goamz/aws/retry.go | 136 - .../github.com/AdRoll/goamz/aws/retry_test.go | 303 -- .../src/github.com/AdRoll/goamz/aws/sign.go | 402 -- .../github.com/AdRoll/goamz/aws/sign_test.go | 569 --- .../AdRoll/goamz/cloudfront/cloudfront.go | 143 - .../goamz/cloudfront/cloudfront_test.go | 52 - .../AdRoll/goamz/cloudfront/testdata/key.pub | 6 - .../github.com/AdRoll/goamz/s3/export_test.go | 27 - .../github.com/AdRoll/goamz/s3/lifecycle.go | 202 - .../AdRoll/goamz/s3/lifecycle_test.go | 205 - .../src/github.com/AdRoll/goamz/s3/multi.go | 502 --- .../github.com/AdRoll/goamz/s3/multi_test.go | 440 --- .../AdRoll/goamz/s3/responses_test.go | 248 -- .../src/github.com/AdRoll/goamz/s3/s3.go | 1293 ------ .../src/github.com/AdRoll/goamz/s3/s3_test.go | 502 --- .../github.com/AdRoll/goamz/s3/s3i_test.go | 603 --- .../github.com/AdRoll/goamz/s3/s3t_test.go | 87 - .../AdRoll/goamz/s3/s3test/server.go | 928 ----- .../src/github.com/AdRoll/goamz/s3/sign.go | 120 - .../github.com/AdRoll/goamz/s3/sign_test.go | 148 - .../Azure/azure-sdk-for-go/.gitignore | 29 - .../Azure/azure-sdk-for-go/.travis.yml | 19 - .../github.com/Azure/azure-sdk-for-go/LICENSE | 202 - .../Azure/azure-sdk-for-go/README.md | 88 - .../Azure/azure-sdk-for-go/storage/blob.go | 858 ---- .../azure-sdk-for-go/storage/blob_test.go | 625 --- .../Azure/azure-sdk-for-go/storage/client.go | 359 -- .../azure-sdk-for-go/storage/client_test.go | 156 - .../Azure/azure-sdk-for-go/storage/queue.go | 230 -- .../azure-sdk-for-go/storage/queue_test.go | 91 - .../Azure/azure-sdk-for-go/storage/util.go | 71 - .../azure-sdk-for-go/storage/util_test.go | 69 - .../src/github.com/Sirupsen/logrus/.gitignore | 1 - .../github.com/Sirupsen/logrus/.travis.yml | 8 - .../github.com/Sirupsen/logrus/CHANGELOG.md | 7 - .../src/github.com/Sirupsen/logrus/LICENSE | 21 - .../src/github.com/Sirupsen/logrus/README.md | 349 -- .../src/github.com/Sirupsen/logrus/entry.go | 252 -- .../github.com/Sirupsen/logrus/entry_test.go | 53 - .../Sirupsen/logrus/examples/basic/basic.go | 50 - .../Sirupsen/logrus/examples/hook/hook.go | 30 - .../github.com/Sirupsen/logrus/exported.go | 188 - .../github.com/Sirupsen/logrus/formatter.go | 48 - .../Sirupsen/logrus/formatter_bench_test.go | 88 - .../logrus/formatters/logstash/logstash.go | 56 - .../formatters/logstash/logstash_test.go | 52 - .../github.com/Sirupsen/logrus/hook_test.go | 122 - .../src/github.com/Sirupsen/logrus/hooks.go | 34 - .../logrus/hooks/airbrake/airbrake.go | 54 - .../logrus/hooks/airbrake/airbrake_test.go | 133 - .../Sirupsen/logrus/hooks/bugsnag/bugsnag.go | 68 - .../logrus/hooks/bugsnag/bugsnag_test.go | 64 - .../logrus/hooks/papertrail/README.md | 28 - .../logrus/hooks/papertrail/papertrail.go | 55 - .../hooks/papertrail/papertrail_test.go | 26 - .../Sirupsen/logrus/hooks/sentry/README.md | 61 - .../Sirupsen/logrus/hooks/sentry/sentry.go | 100 - .../logrus/hooks/sentry/sentry_test.go | 97 - .../Sirupsen/logrus/hooks/syslog/README.md | 20 - .../Sirupsen/logrus/hooks/syslog/syslog.go | 59 - .../logrus/hooks/syslog/syslog_test.go | 26 - .../Sirupsen/logrus/json_formatter.go | 40 - .../Sirupsen/logrus/json_formatter_test.go | 120 - .../src/github.com/Sirupsen/logrus/logger.go | 203 - .../src/github.com/Sirupsen/logrus/logrus.go | 94 - .../github.com/Sirupsen/logrus/logrus_test.go | 301 -- .../Sirupsen/logrus/terminal_darwin.go | 12 - .../Sirupsen/logrus/terminal_freebsd.go | 20 - .../Sirupsen/logrus/terminal_linux.go | 12 - .../Sirupsen/logrus/terminal_notwindows.go | 21 - .../Sirupsen/logrus/terminal_openbsd.go | 7 - .../Sirupsen/logrus/terminal_windows.go | 27 - .../Sirupsen/logrus/text_formatter.go | 149 - .../Sirupsen/logrus/text_formatter_test.go | 61 - .../src/github.com/Sirupsen/logrus/writer.go | 31 - .../github.com/bugsnag/bugsnag-go/.travis.yml | 13 - .../github.com/bugsnag/bugsnag-go/LICENSE.txt | 20 - .../github.com/bugsnag/bugsnag-go/README.md | 489 --- .../bugsnag/bugsnag-go/appengine.go | 76 - .../github.com/bugsnag/bugsnag-go/bugsnag.go | 131 - .../bugsnag/bugsnag-go/bugsnag_test.go | 461 --- .../bugsnag/bugsnag-go/configuration.go | 159 - .../bugsnag/bugsnag-go/configuration_test.go | 58 - .../src/github.com/bugsnag/bugsnag-go/doc.go | 69 - .../bugsnag/bugsnag-go/errors/README.md | 6 - .../bugsnag/bugsnag-go/errors/error.go | 90 - .../bugsnag/bugsnag-go/errors/error_test.go | 117 - .../bugsnag/bugsnag-go/errors/parse_panic.go | 127 - .../bugsnag-go/errors/parse_panic_test.go | 142 - .../bugsnag/bugsnag-go/errors/stackframe.go | 97 - .../github.com/bugsnag/bugsnag-go/event.go | 134 - .../bugsnag/bugsnag-go/json_tags.go | 43 - .../github.com/bugsnag/bugsnag-go/metadata.go | 185 - .../bugsnag/bugsnag-go/metadata_test.go | 182 - .../bugsnag/bugsnag-go/middleware.go | 96 - .../bugsnag/bugsnag-go/middleware_test.go | 88 - .../github.com/bugsnag/bugsnag-go/notifier.go | 95 - .../bugsnag/bugsnag-go/panicwrap.go | 27 - .../bugsnag/bugsnag-go/panicwrap_test.go | 79 - .../github.com/bugsnag/bugsnag-go/payload.go | 96 - .../bugsnag/bugsnag-go/revel/bugsnagrevel.go | 60 - .../src/github.com/bugsnag/osext/LICENSE | 20 - .../src/github.com/bugsnag/osext/osext.go | 32 - .../github.com/bugsnag/osext/osext_plan9.go | 16 - .../github.com/bugsnag/osext/osext_procfs.go | 25 - .../github.com/bugsnag/osext/osext_sysctl.go | 64 - .../github.com/bugsnag/osext/osext_test.go | 79 - .../github.com/bugsnag/osext/osext_windows.go | 34 - .../src/github.com/bugsnag/panicwrap/LICENSE | 21 - .../github.com/bugsnag/panicwrap/README.md | 101 - .../github.com/bugsnag/panicwrap/monitor.go | 63 - .../bugsnag/panicwrap/monitor_windows.go | 7 - .../github.com/bugsnag/panicwrap/panicwrap.go | 339 -- .../bugsnag/panicwrap/panicwrap_test.go | 360 -- .../github.com/codegangsta/cli/.travis.yml | 6 - .../src/github.com/codegangsta/cli/LICENSE | 21 - .../src/github.com/codegangsta/cli/README.md | 298 -- .../src/github.com/codegangsta/cli/app.go | 275 -- .../github.com/codegangsta/cli/app_test.go | 554 --- .../cli/autocomplete/bash_autocomplete | 13 - .../cli/autocomplete/zsh_autocomplete | 5 - .../src/github.com/codegangsta/cli/cli.go | 19 - .../github.com/codegangsta/cli/cli_test.go | 100 - .../src/github.com/codegangsta/cli/command.go | 156 - .../codegangsta/cli/command_test.go | 49 - .../src/github.com/codegangsta/cli/context.go | 339 -- .../codegangsta/cli/context_test.go | 99 - .../src/github.com/codegangsta/cli/flag.go | 454 --- .../github.com/codegangsta/cli/flag_test.go | 742 ---- .../src/github.com/codegangsta/cli/help.go | 211 - .../codegangsta/cli/helpers_test.go | 19 - .../denverdino/aliyungo/oss/client.go | 1265 ------ .../denverdino/aliyungo/oss/client_test.go | 211 - .../denverdino/aliyungo/oss/config_test.go | 14 - .../denverdino/aliyungo/oss/export.go | 23 - .../denverdino/aliyungo/oss/multi.go | 464 --- .../denverdino/aliyungo/oss/multi_test.go | 161 - .../denverdino/aliyungo/oss/regions.go | 53 - .../denverdino/aliyungo/oss/signature.go | 105 - .../denverdino/aliyungo/util/attempt.go | 76 - .../denverdino/aliyungo/util/attempt_test.go | 90 - .../denverdino/aliyungo/util/encoding.go | 123 - .../denverdino/aliyungo/util/encoding_test.go | 52 - .../denverdino/aliyungo/util/iso6801.go | 62 - .../denverdino/aliyungo/util/iso6801_test.go | 50 - .../denverdino/aliyungo/util/signature.go | 40 - .../aliyungo/util/signature_test.go | 14 - .../denverdino/aliyungo/util/util.go | 134 - .../denverdino/aliyungo/util/util_test.go | 43 - .../docker/pkg/tarsum/builder_context.go | 20 - .../docker/pkg/tarsum/builder_context_test.go | 63 - .../docker/docker/pkg/tarsum/fileinfosums.go | 116 - .../docker/pkg/tarsum/fileinfosums_test.go | 62 - .../docker/docker/pkg/tarsum/tarsum.go | 276 -- .../docker/docker/pkg/tarsum/tarsum_spec.md | 225 -- .../docker/docker/pkg/tarsum/tarsum_test.go | 648 ---- .../json | 1 - .../layer.tar | Bin 9216 -> 0 bytes .../json | 1 - .../layer.tar | Bin 1536 -> 0 bytes .../tarsum/testdata/collision/collision-0.tar | Bin 10240 -> 0 bytes .../tarsum/testdata/collision/collision-1.tar | Bin 10240 -> 0 bytes .../tarsum/testdata/collision/collision-2.tar | Bin 10240 -> 0 bytes .../tarsum/testdata/collision/collision-3.tar | Bin 10240 -> 0 bytes .../docker/pkg/tarsum/testdata/xattr/json | 1 - .../pkg/tarsum/testdata/xattr/layer.tar | Bin 2560 -> 0 bytes .../docker/docker/pkg/tarsum/versioning.go | 150 - .../docker/pkg/tarsum/versioning_test.go | 98 - .../docker/docker/pkg/tarsum/writercloser.go | 22 - .../docker/libtrust/CONTRIBUTING.md | 13 - .../src/github.com/docker/libtrust/LICENSE | 191 - .../github.com/docker/libtrust/MAINTAINERS | 3 - .../src/github.com/docker/libtrust/README.md | 18 - .../docker/libtrust/certificates.go | 175 - .../docker/libtrust/certificates_test.go | 111 - .../src/github.com/docker/libtrust/doc.go | 9 - .../src/github.com/docker/libtrust/ec_key.go | 428 -- .../github.com/docker/libtrust/ec_key_test.go | 157 - .../src/github.com/docker/libtrust/filter.go | 50 - .../github.com/docker/libtrust/filter_test.go | 81 - .../src/github.com/docker/libtrust/hash.go | 56 - .../github.com/docker/libtrust/jsonsign.go | 657 ---- .../docker/libtrust/jsonsign_test.go | 380 -- .../src/github.com/docker/libtrust/key.go | 253 -- .../github.com/docker/libtrust/key_files.go | 255 -- .../docker/libtrust/key_files_test.go | 220 -- .../github.com/docker/libtrust/key_manager.go | 175 - .../github.com/docker/libtrust/key_test.go | 80 - .../src/github.com/docker/libtrust/rsa_key.go | 427 -- .../docker/libtrust/rsa_key_test.go | 157 - .../docker/libtrust/testutil/certificates.go | 94 - .../docker/libtrust/tlsdemo/README.md | 50 - .../docker/libtrust/tlsdemo/client.go | 89 - .../docker/libtrust/tlsdemo/gencert.go | 62 - .../docker/libtrust/tlsdemo/genkeys.go | 61 - .../docker/libtrust/tlsdemo/server.go | 80 - .../docker/libtrust/trustgraph/graph.go | 50 - .../libtrust/trustgraph/memory_graph.go | 133 - .../libtrust/trustgraph/memory_graph_test.go | 174 - .../docker/libtrust/trustgraph/statement.go | 227 -- .../libtrust/trustgraph/statement_test.go | 417 -- .../src/github.com/docker/libtrust/util.go | 361 -- .../github.com/docker/libtrust/util_test.go | 23 - .../garyburd/redigo/internal/commandinfo.go | 45 - .../redigo/internal/redistest/testdb.go | 65 - .../github.com/garyburd/redigo/redis/conn.go | 455 --- .../garyburd/redigo/redis/conn_test.go | 542 --- .../github.com/garyburd/redigo/redis/doc.go | 169 - .../github.com/garyburd/redigo/redis/log.go | 117 - .../github.com/garyburd/redigo/redis/pool.go | 389 -- .../garyburd/redigo/redis/pool_test.go | 674 ---- .../garyburd/redigo/redis/pubsub.go | 129 - .../garyburd/redigo/redis/pubsub_test.go | 143 - .../github.com/garyburd/redigo/redis/redis.go | 44 - .../github.com/garyburd/redigo/redis/reply.go | 312 -- .../garyburd/redigo/redis/reply_test.go | 166 - .../github.com/garyburd/redigo/redis/scan.go | 513 --- .../garyburd/redigo/redis/scan_test.go | 412 -- .../garyburd/redigo/redis/script.go | 86 - .../garyburd/redigo/redis/script_test.go | 93 - .../garyburd/redigo/redis/test_test.go | 38 - .../redigo/redis/zpop_example_test.go | 113 - .../github.com/gorilla/context/.travis.yml | 7 - .../src/github.com/gorilla/context/LICENSE | 27 - .../src/github.com/gorilla/context/README.md | 7 - .../src/github.com/gorilla/context/context.go | 143 - .../gorilla/context/context_test.go | 161 - .../src/github.com/gorilla/context/doc.go | 82 - .../github.com/gorilla/handlers/.travis.yml | 8 - .../src/github.com/gorilla/handlers/README.md | 52 - .../github.com/gorilla/handlers/compress.go | 84 - .../gorilla/handlers/compress_test.go | 65 - .../github.com/gorilla/handlers/handlers.go | 378 -- .../gorilla/handlers/handlers_test.go | 305 -- .../src/github.com/gorilla/mux/.travis.yml | 7 - .../src/github.com/gorilla/mux/LICENSE | 27 - .../src/github.com/gorilla/mux/README.md | 7 - .../src/github.com/gorilla/mux/bench_test.go | 21 - .../src/github.com/gorilla/mux/doc.go | 199 - .../src/github.com/gorilla/mux/mux.go | 353 -- .../src/github.com/gorilla/mux/mux_test.go | 943 ----- .../src/github.com/gorilla/mux/old_test.go | 714 ---- .../src/github.com/gorilla/mux/regexp.go | 276 -- .../src/github.com/gorilla/mux/route.go | 524 --- .../mitchellh/mapstructure/.travis.yml | 7 - .../github.com/mitchellh/mapstructure/LICENSE | 21 - .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 151 - .../mapstructure/decode_hooks_test.go | 229 -- .../mitchellh/mapstructure/error.go | 50 - .../mitchellh/mapstructure/mapstructure.go | 746 ---- .../mapstructure_benchmark_test.go | 243 -- .../mapstructure/mapstructure_bugs_test.go | 47 - .../mapstructure_examples_test.go | 203 - .../mapstructure/mapstructure_test.go | 954 ----- .../src/github.com/ncw/swift/.gitignore | 4 - .../src/github.com/ncw/swift/.travis.yml | 10 - .../src/github.com/ncw/swift/COPYING | 20 - .../src/github.com/ncw/swift/README.md | 128 - .../src/github.com/ncw/swift/auth.go | 283 -- .../src/github.com/ncw/swift/auth_v3.go | 207 - .../github.com/ncw/swift/compatibility_1_0.go | 28 - .../github.com/ncw/swift/compatibility_1_1.go | 24 - .../src/github.com/ncw/swift/doc.go | 19 - .../src/github.com/ncw/swift/example_test.go | 97 - .../src/github.com/ncw/swift/meta.go | 174 - .../src/github.com/ncw/swift/meta_test.go | 213 - .../src/github.com/ncw/swift/notes.txt | 55 - .../src/github.com/ncw/swift/rs/rs.go | 83 - .../src/github.com/ncw/swift/rs/rs_test.go | 96 - .../src/github.com/ncw/swift/swift.go | 1841 --------- .../ncw/swift/swift_internal_test.go | 409 -- .../src/github.com/ncw/swift/swift_test.go | 1472 ------- .../github.com/ncw/swift/swifttest/server.go | 885 ----- .../github.com/ncw/swift/timeout_reader.go | 57 - .../ncw/swift/timeout_reader_test.go | 107 - .../github.com/ncw/swift/watchdog_reader.go | 34 - .../ncw/swift/watchdog_reader_test.go | 61 - .../github.com/noahdesu/go-ceph/rados/conn.go | 300 -- .../github.com/noahdesu/go-ceph/rados/doc.go | 4 - .../noahdesu/go-ceph/rados/ioctx.go | 547 --- .../noahdesu/go-ceph/rados/rados.go | 54 - .../noahdesu/go-ceph/rados/rados_test.go | 703 ---- .../github.com/stevvooe/resumable/.gitignore | 24 - .../src/github.com/stevvooe/resumable/LICENSE | 28 - .../github.com/stevvooe/resumable/README.md | 6 - .../stevvooe/resumable/resumable.go | 43 - .../stevvooe/resumable/sha256/resume.go | 53 - .../stevvooe/resumable/sha256/sha256.go | 193 - .../stevvooe/resumable/sha256/sha256_test.go | 176 - .../stevvooe/resumable/sha256/sha256block.go | 128 - .../resumable/sha256/sha256block_386.s | 283 -- .../resumable/sha256/sha256block_amd64.s | 256 -- .../resumable/sha256/sha256block_decl.go | 11 - .../resumable/sha256/sha256resume_test.go | 74 - .../stevvooe/resumable/sha512/resume.go | 53 - .../stevvooe/resumable/sha512/sha512.go | 198 - .../stevvooe/resumable/sha512/sha512_test.go | 176 - .../stevvooe/resumable/sha512/sha512block.go | 144 - .../resumable/sha512/sha512block_amd64.s | 273 -- .../resumable/sha512/sha512block_decl.go | 11 - .../resumable/sha512/sha512resume_test.go | 74 - .../yvasiyarov/go-metrics/.gitignore | 9 - .../github.com/yvasiyarov/go-metrics/LICENSE | 29 - .../yvasiyarov/go-metrics/README.md | 104 - .../cmd/metrics-bench/metrics-bench.go | 20 - .../cmd/metrics-example/metrics-example.go | 154 - .../go-metrics/cmd/never-read/never-read.go | 22 - .../yvasiyarov/go-metrics/counter.go | 112 - .../yvasiyarov/go-metrics/counter_test.go | 77 - .../github.com/yvasiyarov/go-metrics/debug.go | 76 - .../yvasiyarov/go-metrics/debug_test.go | 48 - .../github.com/yvasiyarov/go-metrics/ewma.go | 118 - .../yvasiyarov/go-metrics/ewma_test.go | 225 -- .../github.com/yvasiyarov/go-metrics/gauge.go | 84 - .../yvasiyarov/go-metrics/gauge_float64.go | 91 - .../go-metrics/gauge_float64_test.go | 38 - .../yvasiyarov/go-metrics/gauge_test.go | 37 - .../yvasiyarov/go-metrics/graphite.go | 104 - .../yvasiyarov/go-metrics/graphite_test.go | 22 - .../yvasiyarov/go-metrics/healthcheck.go | 61 - .../yvasiyarov/go-metrics/histogram.go | 192 - .../yvasiyarov/go-metrics/histogram_test.go | 95 - .../go-metrics/influxdb/influxdb.go | 114 - .../github.com/yvasiyarov/go-metrics/json.go | 83 - .../yvasiyarov/go-metrics/json_test.go | 28 - .../yvasiyarov/go-metrics/librato/client.go | 102 - .../yvasiyarov/go-metrics/librato/librato.go | 244 -- .../github.com/yvasiyarov/go-metrics/log.go | 70 - .../yvasiyarov/go-metrics/memory.md | 285 -- .../github.com/yvasiyarov/go-metrics/meter.go | 233 -- .../yvasiyarov/go-metrics/meter_test.go | 60 - .../yvasiyarov/go-metrics/metrics.go | 13 - .../yvasiyarov/go-metrics/metrics_test.go | 107 - .../yvasiyarov/go-metrics/opentsdb.go | 119 - .../yvasiyarov/go-metrics/opentsdb_test.go | 22 - .../yvasiyarov/go-metrics/registry.go | 168 - .../yvasiyarov/go-metrics/registry_test.go | 118 - .../yvasiyarov/go-metrics/runtime.go | 200 - .../yvasiyarov/go-metrics/runtime_cgo.go | 9 - .../yvasiyarov/go-metrics/runtime_no_cgo.go | 7 - .../yvasiyarov/go-metrics/runtime_test.go | 78 - .../yvasiyarov/go-metrics/sample.go | 568 --- .../yvasiyarov/go-metrics/sample_test.go | 352 -- .../yvasiyarov/go-metrics/stathat/stathat.go | 69 - .../yvasiyarov/go-metrics/syslog.go | 78 - .../github.com/yvasiyarov/go-metrics/timer.go | 299 -- .../yvasiyarov/go-metrics/timer_test.go | 81 - .../yvasiyarov/go-metrics/writer.go | 100 - .../yvasiyarov/go-metrics/writer_test.go | 22 - .../github.com/yvasiyarov/gorelic/.gitignore | 4 - .../github.com/yvasiyarov/gorelic/.travis.yml | 1 - .../src/github.com/yvasiyarov/gorelic/LICENSE | 24 - .../github.com/yvasiyarov/gorelic/README.md | 119 - .../github.com/yvasiyarov/gorelic/agent.go | 137 - .../src/github.com/yvasiyarov/gorelic/doc.go | 2 - .../yvasiyarov/gorelic/examples/example1.go | 52 - .../gorelic/examples/example_web.go | 63 - .../yvasiyarov/gorelic/gc_metrics.go | 65 - .../yvasiyarov/gorelic/gometrica.go | 105 - .../yvasiyarov/gorelic/http_metrics.go | 194 - .../yvasiyarov/gorelic/memory_metrics.go | 110 - .../github.com/yvasiyarov/gorelic/nut.json | 15 - .../yvasiyarov/gorelic/runtime_metrics.go | 196 - .../newrelic_platform_go/.travis.yml | 1 - .../yvasiyarov/newrelic_platform_go/LICENSE | 24 - .../yvasiyarov/newrelic_platform_go/README.md | 11 - .../yvasiyarov/newrelic_platform_go/agent.go | 27 - .../newrelic_platform_go/component.go | 71 - .../yvasiyarov/newrelic_platform_go/doc.go | 2 - .../examples/wave_plugin.go | 72 - .../newrelic_platform_go/metrica.go | 42 - .../yvasiyarov/newrelic_platform_go/nut.json | 15 - .../yvasiyarov/newrelic_platform_go/plugin.go | 194 - .../src/golang.org/x/crypto/bcrypt/base64.go | 35 - .../src/golang.org/x/crypto/bcrypt/bcrypt.go | 294 -- .../golang.org/x/crypto/bcrypt/bcrypt_test.go | 226 -- .../src/golang.org/x/crypto/blowfish/block.go | 159 - .../x/crypto/blowfish/blowfish_test.go | 274 -- .../golang.org/x/crypto/blowfish/cipher.go | 91 - .../src/golang.org/x/crypto/blowfish/const.go | 199 - .../src/golang.org/x/net/context/context.go | 432 --- .../golang.org/x/net/context/context_test.go | 553 --- .../x/net/context/withtimeout_test.go | 26 - .../src/gopkg.in/check.v1/.gitignore | 4 - .../_workspace/src/gopkg.in/check.v1/LICENSE | 25 - .../src/gopkg.in/check.v1/README.md | 20 - .../_workspace/src/gopkg.in/check.v1/TODO | 2 - .../src/gopkg.in/check.v1/benchmark.go | 163 - .../src/gopkg.in/check.v1/benchmark_test.go | 91 - .../src/gopkg.in/check.v1/bootstrap_test.go | 82 - .../_workspace/src/gopkg.in/check.v1/check.go | 945 ----- .../src/gopkg.in/check.v1/check_test.go | 207 - .../src/gopkg.in/check.v1/checkers.go | 458 --- .../src/gopkg.in/check.v1/checkers_test.go | 272 -- .../src/gopkg.in/check.v1/export_test.go | 9 - .../src/gopkg.in/check.v1/fixture_test.go | 484 --- .../src/gopkg.in/check.v1/foundation_test.go | 335 -- .../src/gopkg.in/check.v1/helpers.go | 231 -- .../src/gopkg.in/check.v1/helpers_test.go | 519 --- .../src/gopkg.in/check.v1/printer.go | 168 - .../src/gopkg.in/check.v1/printer_test.go | 104 - .../_workspace/src/gopkg.in/check.v1/run.go | 175 - .../src/gopkg.in/check.v1/run_test.go | 419 -- .../_workspace/src/gopkg.in/yaml.v2/LICENSE | 188 - .../src/gopkg.in/yaml.v2/LICENSE.libyaml | 31 - .../_workspace/src/gopkg.in/yaml.v2/README.md | 128 - .../_workspace/src/gopkg.in/yaml.v2/apic.go | 742 ---- .../_workspace/src/gopkg.in/yaml.v2/decode.go | 667 ---- .../src/gopkg.in/yaml.v2/decode_test.go | 953 ----- .../src/gopkg.in/yaml.v2/emitterc.go | 1685 -------- .../_workspace/src/gopkg.in/yaml.v2/encode.go | 289 -- .../src/gopkg.in/yaml.v2/encode_test.go | 463 --- .../src/gopkg.in/yaml.v2/parserc.go | 1096 ------ .../src/gopkg.in/yaml.v2/readerc.go | 391 -- .../src/gopkg.in/yaml.v2/resolve.go | 203 - .../src/gopkg.in/yaml.v2/scannerc.go | 2710 ------------- .../_workspace/src/gopkg.in/yaml.v2/sorter.go | 104 - .../src/gopkg.in/yaml.v2/suite_test.go | 12 - .../src/gopkg.in/yaml.v2/writerc.go | 89 - .../_workspace/src/gopkg.in/yaml.v2/yaml.go | 344 -- .../_workspace/src/gopkg.in/yaml.v2/yamlh.go | 716 ---- .../src/gopkg.in/yaml.v2/yamlprivateh.go | 173 - .../github.com/docker/distribution/Makefile | 10 +- .../github.com/docker/distribution/README.md | 1 - .../github.com/docker/distribution/ROADMAP.md | 30 +- .../github.com/docker/distribution/blobs.go | 3 - .../github.com/docker/distribution/circle.yml | 2 +- .../docker/distribution/cmd/dist/list.go | 14 - .../docker/distribution/cmd/dist/main.go | 21 - .../docker/distribution/cmd/dist/pull.go | 21 - .../docker/distribution/cmd/dist/push.go | 21 - .../registry-api-descriptor-template/main.go | 127 - .../cmd/registry/config-cache.yml | 48 - .../distribution/cmd/registry/config-dev.yml | 60 - .../cmd/registry/config-example.yml | 11 - .../docker/distribution/cmd/registry/rados.go | 5 - .../configuration/configuration.go | 486 --- .../configuration/configuration_test.go | 370 -- .../distribution/configuration/parser.go | 203 - .../docker/distribution/context/doc.go | 13 + .../docker/distribution/context/http.go | 40 +- .../docker/distribution/context/http_test.go | 7 - .../docker/distribution/context/logger.go | 23 +- .../docker/distribution/context/util.go | 2 +- .../docker/distribution/context/version.go | 16 + .../distribution/context/version_test.go | 19 + .../distribution/contrib/apache/README.MD | 36 - .../distribution/contrib/apache/apache.conf | 127 - .../distribution/contrib/ceph/ci-setup.sh | 119 - .../distribution/contrib/compose/README.md | 147 - .../contrib/compose/docker-compose.yml | 15 - .../contrib/compose/nginx/Dockerfile | 6 - .../compose/nginx/docker-registry-v2.conf | 6 - .../compose/nginx/docker-registry.conf | 7 - .../contrib/compose/nginx/nginx.conf | 27 - .../contrib/compose/nginx/registry.conf | 41 - .../contrib/docker-integration/Dockerfile | 46 - .../contrib/docker-integration/README.md | 138 - .../docker-integration/docker-compose.yml | 27 - .../docker-integration/install_certs.sh | 38 - .../docker-integration/nginx/Dockerfile | 10 - .../nginx/docker-registry-v2.conf | 6 - .../nginx/docker-registry.conf | 7 - .../docker-integration/nginx/nginx.conf | 27 - .../nginx/registry-basic.conf | 13 - .../nginx/registry-noauth.conf | 8 - .../docker-integration/nginx/registry.conf | 277 -- .../docker-integration/nginx/test.passwd | 1 - .../contrib/docker-integration/run.sh | 31 - .../docker-integration/run_multiversion.sh | 77 - .../contrib/docker-integration/test_runner.sh | 50 - .../contrib/docker-integration/tls.bats | 102 - .../docker/distribution/digest/digest.go | 11 +- .../docker/distribution/digest/digester.go | 28 + .../docker/distribution/digest/set.go | 54 +- .../docker/distribution/digest/set_test.go | 95 + .../docker/distribution/digest/tarsum.go | 2 +- .../docker/distribution/docs/Dockerfile | 26 - .../docker/distribution/docs/Makefile | 55 - .../docker/distribution/docs/architecture.md | 54 - .../distribution/docs/authentication.md | 185 - .../docker/distribution/docs/building.md | 157 - .../docker/distribution/docs/configuration.md | 1630 -------- .../docker/distribution/docs/deploying.md | 177 - .../docker/distribution/docs/glossary.md | 70 - .../docker/distribution/docs/help.md | 24 - .../docs/images/notifications.gliffy | 1 - .../docs/images/notifications.png | Bin 37836 -> 0 bytes .../docs/images/notifications.svg | 1 - .../distribution/docs/images/registry.gliffy | 1 - .../distribution/docs/images/registry.png | Bin 24298 -> 0 bytes .../distribution/docs/images/registry.svg | 1 - .../docker/distribution/docs/index.md | 63 - .../docker/distribution/docs/introduction.md | 59 - .../docker/distribution/docs/migration.md | 30 - .../docker/distribution/docs/mirror.md | 62 - .../docker/distribution/docs/mkdocs.yml | 18 - .../docker/distribution/docs/notifications.md | 323 -- .../distribution/docs/osx-setup-guide.md | 62 - .../docs/osx/com.docker.registry.plist | 42 - .../docker/distribution/docs/osx/config.yml | 16 - .../docker/distribution/docs/spec/api.md | 3450 ----------------- .../docker/distribution/docs/spec/api.md.tmpl | 1120 ------ .../distribution/docs/spec/auth/token.md | 425 -- .../distribution/docs/spec/implementations.md | 26 - .../docker/distribution/docs/spec/json.md | 88 - .../distribution/docs/spec/manifest-v2-1.md | 153 - .../docs/storage-drivers/azure.md | 24 - .../docs/storage-drivers/filesystem.md | 16 - .../docs/storage-drivers/inmemory.md | 18 - .../distribution/docs/storage-drivers/oss.md | 31 - .../docs/storage-drivers/rados.md | 40 - .../distribution/docs/storage-drivers/s3.md | 34 - .../docs/storage-drivers/swift.md | 139 - .../distribution/docs/storagedrivers.md | 61 - .../github.com/docker/distribution/errors.go | 9 + .../docker/distribution/health/api/api.go | 37 - .../distribution/health/api/api_test.go | 86 - .../distribution/health/checks/checks.go | 35 - .../distribution/health/checks/checks_test.go | 25 - .../docker/distribution/health/doc.go | 130 - .../docker/distribution/health/health.go | 217 -- .../docker/distribution/health/health_test.go | 47 - .../docker/distribution/manifest/doc.go | 1 + .../manifest/{ => schema1}/manifest.go | 32 +- .../manifest/{ => schema1}/manifest_test.go | 10 +- .../manifest/{ => schema1}/sign.go | 2 +- .../manifest/{ => schema1}/verify.go | 2 +- .../docker/distribution/manifest/versioned.go | 9 + .../distribution/notifications/bridge.go | 155 - .../distribution/notifications/bridge_test.go | 166 - .../distribution/notifications/endpoint.go | 86 - .../distribution/notifications/event.go | 152 - .../distribution/notifications/event_test.go | 157 - .../docker/distribution/notifications/http.go | 147 - .../distribution/notifications/http_test.go | 157 - .../distribution/notifications/listener.go | 205 - .../notifications/listener_test.go | 185 - .../distribution/notifications/metrics.go | 152 - .../distribution/notifications/sinks.go | 337 -- .../distribution/notifications/sinks_test.go | 223 -- .../distribution/project/dev-image/Dockerfile | 20 - .../distribution/project/hooks/README.md | 6 - .../project/hooks/configure-hooks.sh | 18 - .../distribution/project/hooks/pre-commit | 29 - .../distribution/reference/reference.go | 341 ++ .../distribution/reference/reference_test.go | 531 +++ .../docker/distribution/reference/regexp.go | 49 + .../distribution/reference/regexp_test.go | 467 +++ .../docker/distribution/registry.go | 8 +- .../registry/api/errcode/register.go | 58 +- .../registry/api/v2/descriptors.go | 273 +- .../distribution/registry/api/v2/errors.go | 18 - .../distribution/registry/api/v2/names.go | 83 - .../registry/api/v2/names_test.go | 231 -- .../registry/api/v2/routes_test.go | 8 + .../distribution/registry/api/v2/urls_test.go | 40 +- .../docker/distribution/registry/auth/auth.go | 142 - .../registry/auth/htpasswd/access.go | 102 - .../registry/auth/htpasswd/access_test.go | 122 - .../registry/auth/htpasswd/htpasswd.go | 80 - .../registry/auth/htpasswd/htpasswd_test.go | 85 - .../registry/auth/silly/access.go | 97 - .../registry/auth/silly/access_test.go | 71 - .../registry/auth/token/accesscontroller.go | 268 -- .../registry/auth/token/stringset.go | 35 - .../distribution/registry/auth/token/token.go | 343 -- .../registry/auth/token/token_test.go | 386 -- .../distribution/registry/auth/token/util.go | 58 - .../registry/client/auth/session.go | 75 +- .../registry/client/auth/session_test.go | 290 +- .../distribution/registry/client/errors.go | 3 +- .../registry/client/repository.go | 40 +- .../registry/client/repository_test.go | 132 +- .../docker/distribution/registry/doc.go | 3 +- .../registry/handlers/api_test.go | 1380 ------- .../distribution/registry/handlers/app.go | 800 ---- .../registry/handlers/app_test.go | 277 -- .../registry/handlers/basicauth.go | 11 - .../registry/handlers/basicauth_prego14.go | 41 - .../distribution/registry/handlers/blob.go | 93 - .../registry/handlers/blobupload.go | 327 -- .../distribution/registry/handlers/catalog.go | 95 - .../distribution/registry/handlers/context.go | 151 - .../distribution/registry/handlers/helpers.go | 62 - .../distribution/registry/handlers/hmac.go | 72 - .../registry/handlers/hmac_test.go | 117 - .../distribution/registry/handlers/hooks.go | 53 - .../distribution/registry/handlers/images.go | 251 -- .../distribution/registry/handlers/mail.go | 45 - .../distribution/registry/handlers/tags.go | 64 - .../registry/listener/listener.go | 74 - .../middleware/registry/middleware.go | 40 - .../middleware/repository/middleware.go | 40 - .../distribution/registry/proxy/proxyauth.go | 54 - .../registry/proxy/proxyblobstore.go | 214 - .../registry/proxy/proxyblobstore_test.go | 231 -- .../registry/proxy/proxymanifeststore.go | 155 - .../registry/proxy/proxymanifeststore_test.go | 235 -- .../registry/proxy/proxymetrics.go | 74 - .../registry/proxy/proxyregistry.go | 139 - .../registry/proxy/scheduler/scheduler.go | 250 -- .../proxy/scheduler/scheduler_test.go | 165 - .../registry/main.go => registry/registry.go} | 230 +- .../registry/storage/blob_test.go | 20 +- .../registry/storage/blobstore.go | 7 +- .../registry/storage/blobwriter.go | 6 +- .../registry/storage/blobwriter_resumable.go | 6 +- .../storage/cache/{ => cachecheck}/suite.go | 11 +- .../registry/storage/cache/memory/memory.go | 4 +- .../storage/cache/memory/memory_test.go | 4 +- .../registry/storage/cache/redis/redis.go | 4 +- .../storage/cache/redis/redis_test.go | 4 +- .../distribution/registry/storage/catalog.go | 2 +- .../registry/storage/catalog_test.go | 7 +- .../registry/storage/driver/azure/azure.go | 366 -- .../storage/driver/azure/azure_test.go | 63 - .../storage/driver/azure/blockblob.go | 24 - .../storage/driver/azure/blockblob_test.go | 155 - .../registry/storage/driver/azure/blockid.go | 60 - .../storage/driver/azure/blockid_test.go | 74 - .../storage/driver/azure/randomwriter.go | 208 - .../storage/driver/azure/randomwriter_test.go | 339 -- .../storage/driver/azure/zerofillwriter.go | 49 - .../driver/azure/zerofillwriter_test.go | 126 - .../registry/storage/driver/base/base.go | 169 - .../storage/driver/factory/factory.go | 55 - .../registry/storage/driver/fileinfo.go | 79 - .../storage/driver/filesystem/driver.go | 291 -- .../storage/driver/filesystem/driver_test.go | 26 - .../storage/driver/inmemory/driver.go | 262 -- .../storage/driver/inmemory/driver_test.go | 19 - .../registry/storage/driver/inmemory/mfs.go | 338 -- .../middleware/cloudfront/middleware.go | 119 - .../driver/middleware/storagemiddleware.go | 39 - .../registry/storage/driver/oss/doc.go | 3 - .../registry/storage/driver/oss/oss.go | 813 ---- .../registry/storage/driver/oss/oss_test.go | 144 - .../registry/storage/driver/rados/doc.go | 3 - .../registry/storage/driver/rados/rados.go | 630 --- .../storage/driver/rados/rados_test.go | 40 - .../registry/storage/driver/s3/s3.go | 826 ---- .../registry/storage/driver/s3/s3_test.go | 138 - .../registry/storage/driver/storagedriver.go | 125 - .../registry/storage/driver/swift/swift.go | 657 ---- .../storage/driver/swift/swift_test.go | 135 - .../storage/driver/testsuites/testsuites.go | 1163 ------ .../registry/storage/linkedblobstore.go | 112 +- .../registry/storage/manifeststore.go | 12 +- .../registry/storage/manifeststore_test.go | 55 +- .../distribution/registry/storage/paths.go | 53 +- .../registry/storage/paths_test.go | 39 +- .../registry/storage/purgeuploads.go | 3 +- .../registry/storage/purgeuploads_test.go | 8 +- .../distribution/registry/storage/registry.go | 132 +- .../registry/storage/revisionstore.go | 10 +- .../registry/storage/signaturestore.go | 15 +- .../distribution/registry/storage/tagstore.go | 24 +- .../distribution/registry/storage/vacuum.go | 6 +- .../distribution/registry/storage/walk.go | 2 +- .../docker/distribution/testutil/handler.go | 148 - .../docker/distribution/testutil/tarfile.go | 95 - .../docker/distribution/version/print.go | 26 - .../docker/distribution/version/version.go | 11 - .../docker/distribution/version/version.sh | 22 - vendor/github.com/docker/docker/.gitignore | 4 +- vendor/github.com/docker/docker/CHANGELOG.md | 248 +- .../github.com/docker/docker/CONTRIBUTING.md | 13 +- vendor/github.com/docker/docker/Dockerfile | 42 +- .../github.com/docker/docker/Dockerfile.gccgo | 62 + .../docker/docker/Dockerfile.simple | 1 - vendor/github.com/docker/docker/MAINTAINERS | 75 +- vendor/github.com/docker/docker/Makefile | 15 +- vendor/github.com/docker/docker/README.md | 23 +- vendor/github.com/docker/docker/ROADMAP.md | 4 +- vendor/github.com/docker/docker/VERSION | 2 +- .../docker/docker/api/api_unit_test.go | 19 - .../docker/docker/api/client/attach.go | 6 +- .../docker/docker/api/client/build.go | 114 +- .../docker/docker/api/client/cli.go | 26 +- .../docker/docker/api/client/commit.go | 2 +- .../github.com/docker/docker/api/client/cp.go | 46 +- .../docker/docker/api/client/create.go | 6 +- .../docker/docker/api/client/diff.go | 2 +- .../docker/docker/api/client/events.go | 2 +- .../docker/docker/api/client/exec.go | 4 +- .../docker/docker/api/client/export.go | 2 +- .../docker/docker/api/client/hijack.go | 67 +- .../docker/docker/api/client/history.go | 60 +- .../docker/docker/api/client/images.go | 2 +- .../docker/docker/api/client/import.go | 4 +- .../docker/docker/api/client/info.go | 36 +- .../docker/docker/api/client/inspect.go | 104 +- .../docker/docker/api/client/kill.go | 2 +- .../docker/docker/api/client/load.go | 2 +- .../docker/docker/api/client/login.go | 10 +- .../docker/docker/api/client/logout.go | 17 +- .../docker/docker/api/client/logs.go | 11 +- .../docker/docker/api/client/network.go | 381 +- .../docker/docker/api/client/pause.go | 2 +- .../docker/docker/api/client/port.go | 2 +- .../github.com/docker/docker/api/client/ps.go | 4 +- .../docker/docker/api/client/ps/custom.go | 67 +- .../docker/api/client/ps/custom_test.go | 46 +- .../docker/docker/api/client/ps/formatter.go | 67 + .../docker/api/client/ps/formatter_test.go | 208 + .../docker/docker/api/client/pull.go | 4 +- .../docker/docker/api/client/push.go | 2 +- .../docker/docker/api/client/rename.go | 11 +- .../docker/docker/api/client/restart.go | 6 +- .../github.com/docker/docker/api/client/rm.go | 2 +- .../docker/docker/api/client/rmi.go | 2 +- .../docker/docker/api/client/run.go | 39 +- .../docker/docker/api/client/save.go | 33 +- .../docker/docker/api/client/search.go | 9 +- .../docker/docker/api/client/service.go | 17 - .../docker/docker/api/client/start.go | 5 +- .../docker/docker/api/client/stats.go | 199 +- .../docker/api/client/stats_unit_test.go | 19 +- .../docker/docker/api/client/stop.go | 4 +- .../docker/docker/api/client/tag.go | 2 +- .../docker/docker/api/client/top.go | 2 +- .../docker/docker/api/client/trust.go | 94 +- .../docker/docker/api/client/trust_test.go | 55 + .../docker/docker/api/client/unpause.go | 2 +- .../docker/docker/api/client/utils.go | 87 +- .../docker/docker/api/client/version.go | 16 +- .../docker/docker/api/client/volume.go | 234 ++ .../docker/docker/api/client/wait.go | 2 +- vendor/github.com/docker/docker/api/common.go | 108 +- .../docker/docker/api/common_test.go | 340 ++ .../docker/docker/api/fixtures/keyfile | 7 + .../docker/docker/api/server/form.go | 56 - .../docker/api/server/httputils/form.go | 73 + .../api/server/{ => httputils}/form_test.go | 45 +- .../docker/api/server/httputils/httputils.go | 180 + .../docker/docker/api/server/middleware.go | 154 + .../docker/api/server/middleware_test.go | 57 + .../docker/api/server/router/local/auth.go | 27 + .../api/server/router/local/container.go | 468 +++ .../docker/api/server/router/local/copy.go | 112 + .../docker/api/server/router/local/exec.go | 135 + .../docker/api/server/router/local/image.go | 547 +++ .../docker/api/server/router/local/info.go | 141 + .../docker/api/server/router/local/inspect.go | 33 + .../docker/api/server/router/local/local.go | 158 + .../api/server/router/network/backend.go | 23 + .../api/server/router/network/network.go | 56 + .../server/router/network/network_routes.go | 258 ++ .../docker/docker/api/server/router/router.go | 18 + .../api/server/router/volume/backend.go | 16 + .../docker/api/server/router/volume/volume.go | 38 + .../api/server/router/volume/volume_routes.go | 66 + .../docker/docker/api/server/server.go | 1723 +------- .../docker/api/server/server_experimental.go | 17 - .../docker/docker/api/server/server_stub.go | 6 - .../docker/docker/api/server/server_test.go | 34 + .../docker/docker/api/server/server_unix.go | 101 +- .../docker/api/server/server_windows.go | 32 +- .../docker/docker/api/types/stats.go | 32 +- .../docker/docker/api/types/types.go | 204 +- .../docker/api/types/versions/README.md | 14 + .../docker/api/types/versions/v1p19/types.go | 35 + .../docker/api/types/versions/v1p20/types.go | 40 + .../docker/docker/cliconfig/config.go | 168 +- .../docker/docker/cliconfig/config_test.go | 392 +- .../github.com/docker/docker/daemon/README.md | 1 - .../docker/docker/daemon/archive.go | 309 +- .../docker/docker/daemon/archive_unix.go | 2 +- .../github.com/docker/docker/daemon/attach.go | 81 +- .../docker/docker/daemon/changes.go | 4 +- .../github.com/docker/docker/daemon/commit.go | 67 +- .../github.com/docker/docker/daemon/config.go | 31 +- .../docker/daemon/config_experimental.go | 115 +- .../docker/docker/daemon/config_unix.go | 9 +- .../docker/docker/daemon/config_windows.go | 2 +- .../docker/docker/daemon/container.go | 869 +---- .../docker/daemon/container_unit_test.go | 35 +- .../docker/docker/daemon/container_unix.go | 1174 +++--- .../docker/docker/daemon/container_windows.go | 205 +- .../github.com/docker/docker/daemon/create.go | 138 +- .../docker/docker/daemon/create_unix.go | 50 +- .../docker/docker/daemon/create_windows.go | 75 +- .../github.com/docker/docker/daemon/daemon.go | 767 +++- .../docker/docker/daemon/daemon_btrfs.go | 1 + .../docker/daemon/daemon_devicemapper.go | 1 + .../docker/daemon/daemon_experimental.go | 107 + .../docker/docker/daemon/daemon_linux.go | 60 + .../docker/docker/daemon/daemon_linux_test.go | 74 + .../docker/docker/daemon/daemon_overlay.go | 1 + .../docker/docker/daemon/daemon_stub.go | 28 + .../docker/docker/daemon/daemon_test.go | 121 +- .../docker/docker/daemon/daemon_unix.go | 430 +- .../daemon_unix_test.go} | 47 +- .../docker/daemon/daemon_unsupported.go | 5 + .../docker/docker/daemon/daemon_windows.go | 143 +- .../docker/docker/daemon/daemon_zfs.go | 1 + .../github.com/docker/docker/daemon/delete.go | 98 +- .../docker/docker/daemon/delete_test.go | 39 + .../docker/docker/daemon/discovery.go | 48 + .../github.com/docker/docker/daemon/errors.go | 23 + .../github.com/docker/docker/daemon/events.go | 10 + .../github.com/docker/docker/daemon/exec.go | 252 +- .../docker/docker/daemon/exec_freebsd.go | 9 - .../docker/docker/daemon/exec_linux.go | 18 - .../docker/docker/daemon/exec_windows.go | 9 - .../github.com/docker/docker/daemon/export.go | 37 +- .../docker/docker/daemon/history.go | 3 +- .../docker/docker/daemon/image_delete.go | 448 ++- .../github.com/docker/docker/daemon/info.go | 42 +- .../docker/docker/daemon/inspect.go | 166 +- .../docker/docker/daemon/inspect_unix.go | 35 +- .../docker/docker/daemon/inspect_windows.go | 17 +- .../github.com/docker/docker/daemon/kill.go | 101 +- .../github.com/docker/docker/daemon/list.go | 479 ++- .../docker/docker/daemon/list_unix.go | 9 + .../docker/docker/daemon/list_windows.go | 16 + .../docker/docker/daemon/logdrivers_linux.go | 6 +- .../docker/daemon/logdrivers_windows.go | 5 +- .../github.com/docker/docker/daemon/logs.go | 70 +- .../docker/docker/daemon/monitor.go | 89 +- .../github.com/docker/docker/daemon/mounts.go | 46 + .../docker/docker/daemon/network.go | 148 + .../docker/docker/daemon/network/settings.go | 40 +- .../github.com/docker/docker/daemon/pause.go | 32 +- .../github.com/docker/docker/daemon/rename.go | 71 +- .../github.com/docker/docker/daemon/resize.go | 20 +- .../docker/docker/daemon/restart.go | 38 +- .../github.com/docker/docker/daemon/start.go | 136 +- .../github.com/docker/docker/daemon/state.go | 83 +- .../docker/docker/daemon/state_test.go | 23 +- .../docker/docker/daemon/state_unix.go | 12 + .../docker/docker/daemon/state_windows.go | 9 + .../github.com/docker/docker/daemon/stats.go | 124 +- .../docker/daemon/stats_collector_unix.go | 74 +- .../docker/daemon/stats_collector_windows.go | 2 +- .../docker/docker/daemon/stats_freebsd.go | 6 +- .../docker/docker/daemon/stats_linux.go | 32 +- .../docker/docker/daemon/stats_windows.go | 6 +- .../github.com/docker/docker/daemon/stop.go | 51 +- .../docker/daemon/{top.go => top_unix.go} | 20 +- .../docker/docker/daemon/top_windows.go | 11 + .../docker/docker/daemon/unpause.go | 32 +- .../docker/docker/daemon/utils_freebsd.go | 9 - .../docker/docker/daemon/utils_linux.go | 33 +- .../docker/docker/daemon/utils_test.go | 28 - .../docker/docker/daemon/volumes.go | 193 +- .../docker/daemon/volumes_linux_unit_test.go | 87 - .../docker/docker/daemon/volumes_unit_test.go | 15 +- .../docker/docker/daemon/volumes_unix.go | 284 +- .../docker/docker/daemon/volumes_windows.go | 53 +- .../github.com/docker/docker/daemon/wait.go | 5 + .../docker/dockerversion/version_lib.go | 16 + .../github.com/docker/docker/errors/README.md | 58 + .../docker/docker/errors/builder.go | 93 + .../github.com/docker/docker/errors/daemon.go | 951 +++++ .../github.com/docker/docker/errors/error.go | 6 + .../github.com/docker/docker/errors/image.go | 20 + .../github.com/docker/docker/errors/server.go | 36 + .../github.com/docker/docker/graph/export.go | 68 +- .../github.com/docker/docker/graph/graph.go | 475 ++- .../docker/docker/graph/graph_test.go | 34 +- .../docker/docker/graph/graph_unix.go | 120 - .../docker/docker/graph/graph_windows.go | 164 - .../github.com/docker/docker/graph/history.go | 36 +- .../github.com/docker/docker/graph/import.go | 43 +- vendor/github.com/docker/docker/graph/list.go | 105 +- vendor/github.com/docker/docker/graph/load.go | 25 +- .../github.com/docker/docker/graph/mutex.go | 45 - .../docker/docker/graph/pools_test.go | 27 +- vendor/github.com/docker/docker/graph/pull.go | 39 +- .../github.com/docker/docker/graph/pull_v1.go | 101 +- .../github.com/docker/docker/graph/pull_v2.go | 689 +++- .../docker/docker/graph/pull_v2_test.go | 195 + vendor/github.com/docker/docker/graph/push.go | 49 +- .../github.com/docker/docker/graph/push_v1.go | 91 +- .../github.com/docker/docker/graph/push_v2.go | 152 +- .../docker/docker/graph/registry.go | 23 +- .../github.com/docker/docker/graph/service.go | 48 +- vendor/github.com/docker/docker/graph/tags.go | 235 +- .../docker/docker/graph/tags/tags.go | 15 +- .../docker/graph/tags/tags_unit_test.go | 6 +- .../docker/docker/graph/tags_unit_test.go | 32 +- .../fixtures/post1.9/expected_computed_id | 1 + .../image/fixtures/post1.9/expected_config | 1 + .../docker/image/fixtures/post1.9/layer_id | 1 + .../docker/image/fixtures/post1.9/parent_id | 1 + .../image/fixtures/post1.9/v1compatibility | 1 + .../fixtures/pre1.9/expected_computed_id | 1 + .../image/fixtures/pre1.9/expected_config | 1 + .../docker/image/fixtures/pre1.9/layer_id | 1 + .../docker/image/fixtures/pre1.9/parent_id | 1 + .../image/fixtures/pre1.9/v1compatibility | 1 + .../github.com/docker/docker/image/image.go | 96 +- .../docker/docker/image/image_test.go | 55 + .../github.com/docker/docker/opts/envfile.go | 31 +- .../docker/docker/opts/envfile_test.go | 15 +- .../docker/docker/opts/hosts_unix.go | 1 + .../docker/docker/opts/hosts_windows.go | 5 +- vendor/github.com/docker/docker/opts/ip.go | 19 +- .../github.com/docker/docker/opts/ip_test.go | 12 +- vendor/github.com/docker/docker/opts/opts.go | 150 +- .../docker/docker/opts/opts_test.go | 140 +- .../docker/docker/opts/opts_unix.go | 6 + .../docker/docker/opts/opts_windows.go | 56 + .../github.com/docker/docker/opts/ulimit.go | 5 + .../docker/docker/pkg/archive/archive.go | 274 +- .../docker/docker/pkg/archive/archive_test.go | 8 +- .../docker/docker/pkg/archive/archive_unix.go | 29 +- .../docker/pkg/archive/archive_windows.go | 24 +- .../pkg/archive/archive_windows_test.go | 22 + .../docker/docker/pkg/archive/changes.go | 77 +- .../docker/pkg/archive/changes_posix_test.go | 4 +- .../docker/docker/pkg/archive/changes_test.go | 33 +- .../docker/docker/pkg/archive/changes_unix.go | 15 +- .../docker/pkg/archive/changes_windows.go | 12 +- .../docker/docker/pkg/archive/copy.go | 259 +- .../docker/docker/pkg/archive/copy_test.go | 24 +- .../docker/docker/pkg/archive/copy_unix.go | 11 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 109 +- .../docker/docker/pkg/archive/diff_test.go | 180 + .../docker/docker/pkg/archive/utils_test.go | 2 +- .../docker/docker/pkg/archive/whiteouts.go | 23 + .../docker/docker/pkg/fileutils/fileutils.go | 12 - .../docker/pkg/fileutils/fileutils_unix.go | 22 + .../docker/pkg/fileutils/fileutils_windows.go | 7 + .../docker/docker/pkg/httputils/httputils.go | 38 +- .../docker/pkg/httputils/httputils_test.go | 115 + .../docker/docker/pkg/httputils/mimetype.go | 1 + .../docker/pkg/httputils/mimetype_test.go | 13 + .../httputils/resumablerequestreader_test.go | 224 ++ .../docker/docker/pkg/idtools/idtools.go | 195 + .../docker/docker/pkg/idtools/idtools_unix.go | 60 + .../docker/pkg/idtools/idtools_unix_test.go | 243 ++ .../docker/pkg/idtools/idtools_windows.go | 18 + .../docker/pkg/idtools/usergroupadd_linux.go | 155 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../docker/docker/pkg/ioutils/bytespipe.go | 89 + .../docker/pkg/ioutils/bytespipe_test.go | 141 + .../docker/docker/pkg/ioutils/fmt.go | 8 + .../docker/docker/pkg/ioutils/multireader.go | 2 +- .../docker/docker/pkg/ioutils/readers.go | 140 +- .../docker/docker/pkg/ioutils/readers_test.go | 18 +- .../docker/docker/pkg/ioutils/temp_unix.go | 10 + .../docker/docker/pkg/ioutils/temp_windows.go | 18 + .../docker/docker/pkg/ioutils/writeflusher.go | 63 +- .../docker/docker/pkg/ioutils/writers.go | 8 +- .../docker/pkg/jsonmessage/jsonmessage.go | 32 +- .../pkg/jsonmessage/jsonmessage_test.go | 33 +- .../docker/docker/pkg/mflag/flag.go | 50 +- .../github.com/docker/docker/pkg/nat/nat.go | 38 +- .../docker/docker/pkg/nat/nat_test.go | 64 + .../github.com/docker/docker/pkg/nat/sort.go | 7 +- .../{kernel_test.go => kernel_unix_test.go} | 4 + .../operatingsystem/operatingsystem_linux.go | 36 +- ...m_test.go => operatingsystem_unix_test.go} | 133 +- .../docker/docker/pkg/parsers/parsers.go | 122 +- .../docker/docker/pkg/parsers/parsers_test.go | 171 +- .../docker/docker/pkg/random/random.go | 12 +- .../docker/docker/pkg/signal/README.md | 1 + .../docker/docker/pkg/signal/signal.go | 54 + .../docker/docker/pkg/signal/signal_darwin.go | 41 + .../docker/pkg/signal/signal_freebsd.go | 43 + .../docker/docker/pkg/signal/signal_linux.go | 44 + .../docker/docker/pkg/signal/signal_unix.go | 19 + .../docker/pkg/signal/signal_unsupported.go | 10 + .../docker/pkg/signal/signal_windows.go | 27 + .../docker/docker/pkg/signal/trap.go | 74 + .../docker/docker/pkg/stdcopy/stdcopy.go | 59 +- .../docker/docker/pkg/stdcopy/stdcopy_test.go | 178 +- .../docker/docker/pkg/stringid/stringid.go | 2 +- .../docker/docker/pkg/stringutils/README.md | 1 + .../docker/pkg/stringutils/stringutils.go | 87 + .../pkg/stringutils/stringutils_test.go | 105 + .../docker/docker/pkg/stringutils/strslice.go | 71 + .../docker/pkg/stringutils/strslice_test.go | 135 + .../docker/docker/pkg/symlink/README.md | 3 +- .../docker/docker/pkg/symlink/fs.go | 20 +- .../docker/docker/pkg/symlink/fs_unix.go | 11 + .../docker/docker/pkg/symlink/fs_windows.go | 156 + .../docker/docker/pkg/system/chtimes.go | 31 + .../docker/docker/pkg/system/chtimes_test.go | 120 + .../docker/pkg/system/chtimes_unix_test.go | 121 + .../docker/pkg/system/chtimes_windows_test.go | 114 + .../docker/docker/pkg/system/errors.go | 1 + .../docker/pkg/system/events_windows.go | 16 +- .../docker/docker/pkg/system/filesys.go | 8 + .../docker/pkg/system/filesys_windows.go | 18 + .../docker/docker/pkg/system/lstat.go | 4 +- .../{lstat_test.go => lstat_unix_test.go} | 2 + .../docker/docker/pkg/system/lstat_windows.go | 8 +- .../docker/docker/pkg/system/meminfo_linux.go | 5 - ...nfo_linux_test.go => meminfo_unix_test.go} | 2 + .../docker/pkg/system/meminfo_unsupported.go | 1 + .../docker/docker/pkg/system/mknod.go | 6 +- .../docker/docker/pkg/system/mknod_windows.go | 2 + .../docker/docker/pkg/system/stat.go | 27 +- .../docker/docker/pkg/system/stat_freebsd.go | 6 +- .../docker/docker/pkg/system/stat_linux.go | 12 +- .../{stat_test.go => stat_unix_test.go} | 6 +- .../docker/pkg/system/stat_unsupported.go | 6 +- .../docker/docker/pkg/system/stat_windows.go | 19 +- .../docker/docker/pkg/system/syscall_unix.go | 11 + .../docker/pkg/system/syscall_windows.go | 36 + .../docker/docker/pkg/system/umask.go | 2 + .../docker/docker/pkg/system/umask_windows.go | 1 + .../docker/docker/pkg/system/utimes_darwin.go | 5 +- .../docker/pkg/system/utimes_freebsd.go | 6 +- .../docker/docker/pkg/system/utimes_linux.go | 12 +- .../{utimes_test.go => utimes_unix_test.go} | 2 + .../docker/pkg/system/utimes_unsupported.go | 5 +- .../docker/docker/pkg/system/xattrs_linux.go | 6 +- .../docker/pkg/system/xattrs_unsupported.go | 2 + .../docker/docker/pkg/tarsum/tarsum.go | 2 +- .../docker/docker/pkg/tarsum/tarsum_spec.md | 5 + .../docker/docker/pkg/tarsum/versioning.go | 10 +- .../docker/docker/pkg/term/term_windows.go | 58 + .../docker/pkg/term/windows/ansi_reader.go | 118 +- .../docker/pkg/term/windows/ansi_writer.go | 20 +- .../docker/docker/pkg/term/windows/console.go | 6 +- .../docker/docker/pkg/tlsconfig/config.go | 13 +- .../docker/docker/pkg/units/size.go | 2 +- .../docker/docker/registry/config.go | 42 +- .../docker/docker/registry/config_unix.go | 22 + .../docker/docker/registry/config_windows.go | 30 + .../docker/docker/registry/endpoint.go | 13 +- .../docker/docker/registry/registry.go | 32 +- .../docker/docker/registry/registry_test.go | 55 +- .../docker/docker/registry/service.go | 132 +- .../docker/docker/registry/service_v1.go | 54 + .../docker/docker/registry/service_v2.go | 83 + .../docker/docker/registry/session.go | 9 +- .../docker/docker/runconfig/compare_test.go | 13 +- .../docker/docker/runconfig/config.go | 248 +- .../docker/docker/runconfig/config_test.go | 253 +- .../docker/docker/runconfig/config_unix.go | 53 + .../docker/docker/runconfig/config_windows.go | 13 + .../docker/docker/runconfig/exec.go | 26 +- .../{ => unix}/container_config_1_14.json | 0 .../{ => unix}/container_config_1_17.json | 1 + .../{ => unix}/container_config_1_19.json | 1 + .../{ => unix}/container_hostconfig_1_14.json | 0 .../{ => unix}/container_hostconfig_1_19.json | 0 .../windows/container_config_1_19.json | 58 + .../docker/docker/runconfig/hostconfig.go | 233 +- .../docker/runconfig/hostconfig_test.go | 94 +- .../docker/runconfig/hostconfig_unix.go | 113 + .../docker/runconfig/hostconfig_windows.go | 61 + .../docker/docker/runconfig/merge.go | 10 + .../docker/docker/runconfig/parse.go | 288 +- .../docker/runconfig/parse_experimental.go | 19 - .../docker/docker/runconfig/parse_stub.go | 14 - .../docker/docker/runconfig/parse_test.go | 381 +- .../docker/docker/runconfig/parse_unix.go | 58 - .../docker/docker/runconfig/parse_windows.go | 20 - .../github.com/docker/docker/utils/names.go | 9 + .../github.com/docker/docker/utils/timeout.go | 21 + .../github.com/docker/docker/utils/utils.go | 50 +- .../docker/docker/utils/utils_test.go | 13 +- .../docker/docker/utils/utils_unix.go | 11 + .../docker/docker/utils/utils_windows.go | 17 + .../docker/docker/volume/drivers/adapter.go | 9 +- .../docker/docker/volume/drivers/api.go | 25 - .../docker/docker/volume/drivers/extpoint.go | 43 +- .../docker/volume/drivers/extpoint_test.go | 22 + .../docker/docker/volume/drivers/proxy.go | 4 +- .../docker/volume/drivers/proxy_test.go | 2 +- .../docker/docker/volume/local/local.go | 140 +- .../docker/docker/volume/local/local_test.go | 126 + .../docker/docker/volume/local/local_unix.go | 29 + .../docker/volume/local/local_windows.go | 18 + .../docker/docker/volume/store/store.go | 228 ++ .../docker/docker/volume/store/store_test.go | 152 + .../docker/docker/volume/store/store_unix.go | 9 + .../docker/volume/store/store_windows.go | 12 + .../docker/volume/testutils/testutils.go | 68 + .../github.com/docker/docker/volume/volume.go | 157 +- .../docker/docker/volume/volume_test.go | 261 ++ .../docker/docker/volume/volume_unix.go | 132 + .../docker/docker/volume/volume_windows.go | 181 + .../github.com/docker/libcompose/MAINTAINERS | 64 +- .../docker/libcompose/docker/builder.go | 15 +- .../docker/libcompose/docker/container.go | 2 + .../github.com/docker/libcompose/generate.go | 4 - .../docker/libcompose/project/types_yaml.go | 18 - .../libcompose/project/types_yaml_test.go | 26 + .../github.com/docker/libnetwork/.gitignore | 5 +- vendor/github.com/docker/libnetwork/Makefile | 48 +- vendor/github.com/docker/libnetwork/README.md | 87 +- .../github.com/docker/libnetwork/Vagrantfile | 58 + .../docker/libnetwork/controller.go | 584 ++- .../docker/libnetwork/default_gateway.go | 141 + .../libnetwork/default_gateway_freebsd.go | 7 + .../libnetwork/default_gateway_linux.go | 29 + .../libnetwork/default_gateway_windows.go | 7 + .../github.com/docker/libnetwork/drivers.go | 78 + .../docker/libnetwork/drivers_freebsd.go | 14 +- .../docker/libnetwork/drivers_linux.go | 22 +- .../docker/libnetwork/drivers_windows.go | 16 +- .../github.com/docker/libnetwork/endpoint.go | 1164 +++--- .../docker/libnetwork/endpoint_cnt.go | 162 + .../docker/libnetwork/endpoint_info.go | 317 +- vendor/github.com/docker/libnetwork/error.go | 2 +- .../libnetwork/libnetwork_internal_test.go | 408 +- .../docker/libnetwork/libnetwork_test.go | 1172 +++--- .../github.com/docker/libnetwork/network.go | 931 ++++- .../libnetwork/resolvconf/dns/resolvconf.go | 2 +- .../libnetwork/resolvconf/resolvconf.go | 102 +- .../libnetwork/resolvconf/resolvconf_test.go | 111 +- .../github.com/docker/libnetwork/sandbox.go | 1040 +++++ .../docker/libnetwork/sandbox_externalkey.go | 185 + .../docker/libnetwork/sandbox_store.go | 233 ++ .../docker/libnetwork/sandbox_test.go | 213 + .../docker/libnetwork/sandboxdata.go | 251 -- .../docker/libnetwork/sandboxdata_test.go | 141 - vendor/github.com/docker/libnetwork/store.go | 729 ++-- .../docker/libnetwork/store_test.go | 144 + .../github.com/docker/libnetwork/wrapmake.sh | 11 + .../fsouza/go-dockerclient/.travis.yml | 10 +- .../github.com/fsouza/go-dockerclient/AUTHORS | 2 + .../fsouza/go-dockerclient/Makefile | 9 + .../fsouza/go-dockerclient/README.markdown | 1 - .../fsouza/go-dockerclient/client.go | 3 + .../fsouza/go-dockerclient/client_test.go | 17 +- .../fsouza/go-dockerclient/container.go | 65 +- .../fsouza/go-dockerclient/image.go | 1 + .../fsouza/go-dockerclient/network.go | 62 +- .../fsouza/go-dockerclient/network_test.go | 4 +- .../fsouza/go-dockerclient/volume.go | 2 +- .../fsouza/go-dockerclient/volume_test.go | 2 +- vendor/github.com/gorilla/context/.travis.yml | 2 - .../gorilla/context/context_test.go | 2 +- vendor/github.com/gorilla/mux/doc.go | 17 +- vendor/github.com/gorilla/mux/mux.go | 124 +- vendor/github.com/gorilla/mux/mux_test.go | 252 -- vendor/github.com/gorilla/mux/regexp.go | 45 +- vendor/github.com/gorilla/mux/route.go | 101 +- .../github.com/opencontainers/runc/.gitignore | 2 - .../opencontainers/runc/MAINTAINERS | 3 +- .../github.com/opencontainers/runc/Makefile | 6 +- .../github.com/opencontainers/runc/README.md | 488 ++- .../opencontainers/runc/checkpoint.go | 28 +- vendor/github.com/opencontainers/runc/exec.go | 72 + vendor/github.com/opencontainers/runc/kill.go | 87 + .../runc/libcontainer/README.md | 28 +- .../runc/libcontainer/capabilities_linux.go | 58 +- .../runc/libcontainer/cgroups/cgroups_test.go | 21 +- .../runc/libcontainer/cgroups/fs/apply_raw.go | 16 +- .../runc/libcontainer/cgroups/fs/blkio.go | 30 +- .../libcontainer/cgroups/fs/blkio_test.go | 82 +- .../runc/libcontainer/cgroups/fs/cpuset.go | 2 +- .../runc/libcontainer/cgroups/fs/hugetlb.go | 2 +- .../libcontainer/cgroups/fs/hugetlb_test.go | 88 +- .../runc/libcontainer/cgroups/fs/memory.go | 19 +- .../runc/libcontainer/cgroups/fs/name.go | 25 + .../cgroups/systemd/apply_systemd.go | 115 +- .../runc/libcontainer/cgroups/utils.go | 96 +- .../runc/libcontainer/compat_1.5_linux.go | 10 + .../runc/libcontainer/configs/blkio_device.go | 61 + .../runc/libcontainer/configs/cgroup.go | 31 +- .../runc/libcontainer/configs/config.go | 124 +- .../runc/libcontainer/configs/config_unix.go | 6 +- .../libcontainer/configs/hugepage_limit.go | 2 +- .../runc/libcontainer/configs/mount.go | 10 +- .../runc/libcontainer/console_linux.go | 2 +- .../runc/libcontainer/container.go | 8 + .../runc/libcontainer/container_linux.go | 149 +- .../runc/libcontainer/criu_opts.go | 17 + .../runc/libcontainer/factory_linux.go | 2 +- .../runc/libcontainer/factory_linux_test.go | 2 + .../runc/libcontainer/init_linux.go | 128 +- .../runc/libcontainer/network_linux.go | 103 +- .../runc/libcontainer/nsenter/nsexec.c | 24 +- .../runc/libcontainer/process_linux.go | 35 +- .../runc/libcontainer/restored_process.go | 6 +- .../runc/libcontainer/rootfs_linux.go | 237 +- .../runc/libcontainer/setgroups_linux.go | 11 + .../runc/libcontainer/setns_init_linux.go | 9 + .../runc/libcontainer/standard_init_linux.go | 13 +- .../runc/libcontainer/user/user.go | 39 +- .../runc/libcontainer/user/user_test.go | 36 + vendor/github.com/opencontainers/runc/main.go | 49 +- .../opencontainers/runc/main_unsupported.go | 1 + .../github.com/opencontainers/runc/pause.go | 33 + .../github.com/opencontainers/runc/restore.go | 105 +- .../opencontainers/runc/rlimit_linux.go | 49 + vendor/github.com/opencontainers/runc/run.go | 108 - vendor/github.com/opencontainers/runc/spec.go | 531 ++- .../github.com/opencontainers/runc/start.go | 155 + vendor/github.com/opencontainers/runc/tty.go | 23 +- .../github.com/opencontainers/runc/utils.go | 2 +- .../rancher/docker-from-scratch/.dockerignore | 1 + .../rancher/docker-from-scratch/.gitignore | 1 + .../rancher/docker-from-scratch/glide.yaml | 10 +- .../rancher/docker-from-scratch/scratch.go | 2 +- .../github.com/stretchr/testify/.travis.yml | 2 + vendor/github.com/stretchr/testify/README.md | 8 +- .../stretchr/testify/assert/assertions.go | 166 +- .../testify/assert/assertions_test.go | 280 +- .../github.com/stretchr/testify/assert/doc.go | 111 +- .../testify/assert/forward_assertions.go | 29 +- .../testify/assert/forward_assertions_test.go | 100 + .../testify/assert/http_assertions.go | 48 +- .../stretchr/testify/require/doc.go | 50 - .../testify/require/forward_requirements.go | 21 +- .../require/forward_requirements_test.go | 125 + .../stretchr/testify/require/requirements.go | 41 +- .../testify/require/requirements_test.go | 103 + .../github.com/vishvananda/netlink/README.md | 8 +- vendor/github.com/vishvananda/netlink/addr.go | 4 +- .../vishvananda/netlink/addr_linux.go | 18 +- .../github.com/vishvananda/netlink/class.go | 110 + .../vishvananda/netlink/class_linux.go | 168 + .../vishvananda/netlink/class_test.go | 406 ++ .../github.com/vishvananda/netlink/filter.go | 140 + .../vishvananda/netlink/filter_linux.go | 322 ++ .../vishvananda/netlink/filter_test.go | 248 ++ vendor/github.com/vishvananda/netlink/link.go | 66 +- .../vishvananda/netlink/link_linux.go | 141 +- .../vishvananda/netlink/link_test.go | 188 +- .../vishvananda/netlink/link_tuntap_linux.go | 14 + .../vishvananda/netlink/neigh_linux.go | 7 +- .../github.com/vishvananda/netlink/netlink.go | 7 +- .../vishvananda/netlink/nl/link_linux.go | 10 +- .../vishvananda/netlink/nl/nl_linux.go | 39 +- .../vishvananda/netlink/nl/route_linux.go | 9 + .../vishvananda/netlink/nl/tc_linux.go | 627 +++ .../vishvananda/netlink/nl/tc_linux_test.go | 173 + .../vishvananda/netlink/nl/xfrm_linux.go | 7 +- .../vishvananda/netlink/protinfo.go | 2 +- .../github.com/vishvananda/netlink/qdisc.go | 290 ++ .../vishvananda/netlink/qdisc_linux.go | 415 ++ .../vishvananda/netlink/qdisc_test.go | 345 ++ .../github.com/vishvananda/netlink/route.go | 46 +- .../vishvananda/netlink/route_linux.go | 126 +- .../vishvananda/netlink/route_test.go | 62 + .../vishvananda/netlink/xfrm_policy_linux.go | 2 +- .../vishvananda/netlink/xfrm_state_linux.go | 2 +- 1291 files changed, 40107 insertions(+), 123532 deletions(-) delete mode 100644 vendor/github.com/codegangsta/cli/cli_test.go create mode 100644 vendor/github.com/codegangsta/cli/help_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/Godeps.json delete mode 100644 vendor/github.com/docker/distribution/Godeps/Readme delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/.gitignore delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/client.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/export_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/regions.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/testdata/key.pub delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/export_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/responses_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3i_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3t_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3test/server.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.gitignore delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/LICENSE.txt delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/appengine.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/doc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/stackframe.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/event.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/json_tags.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/notifier.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/payload.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/revel/bugsnagrevel.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_plan9.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_procfs.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_sysctl.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_windows.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor_windows.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/app.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/command.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/context.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/help.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/doc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/filter.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/hash.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/util.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/context.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/context_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/doc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/doc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/mux.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/route.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/.gitignore delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/COPYING delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/auth.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/auth_v3.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_0.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_1.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/doc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/example_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/meta.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/notes.txt delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/rs/rs.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swifttest/server.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/conn.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/doc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/ioctx.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/rados.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/rados_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/.gitignore delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/resumable.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_386.s delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_amd64.s delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_decl.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256resume_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_amd64.s delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_decl.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512resume_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/.gitignore delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-bench/metrics-bench.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-example/metrics-example.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/never-read/never-read.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/healthcheck.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/influxdb/influxdb.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/client.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/librato.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/log.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/memory.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_cgo.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/stathat/stathat.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/syslog.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.gitignore delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/agent.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/doc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example1.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example_web.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gc_metrics.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gometrica.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/http_metrics.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/memory_metrics.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/nut.json delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/runtime_metrics.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/.travis.yml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/agent.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/component.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/doc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/examples/wave_plugin.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/metrica.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/nut.json delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/plugin.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/base64.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/const.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/context.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/context_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/TODO delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/check.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/printer.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/run.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go delete mode 100644 vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go delete mode 100644 vendor/github.com/docker/distribution/cmd/dist/list.go delete mode 100644 vendor/github.com/docker/distribution/cmd/dist/main.go delete mode 100644 vendor/github.com/docker/distribution/cmd/dist/pull.go delete mode 100644 vendor/github.com/docker/distribution/cmd/dist/push.go delete mode 100644 vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go delete mode 100644 vendor/github.com/docker/distribution/cmd/registry/config-cache.yml delete mode 100644 vendor/github.com/docker/distribution/cmd/registry/config-dev.yml delete mode 100644 vendor/github.com/docker/distribution/cmd/registry/config-example.yml delete mode 100644 vendor/github.com/docker/distribution/cmd/registry/rados.go delete mode 100644 vendor/github.com/docker/distribution/configuration/configuration.go delete mode 100644 vendor/github.com/docker/distribution/configuration/configuration_test.go delete mode 100644 vendor/github.com/docker/distribution/configuration/parser.go create mode 100644 vendor/github.com/docker/distribution/context/version.go create mode 100644 vendor/github.com/docker/distribution/context/version_test.go delete mode 100644 vendor/github.com/docker/distribution/contrib/apache/README.MD delete mode 100644 vendor/github.com/docker/distribution/contrib/apache/apache.conf delete mode 100755 vendor/github.com/docker/distribution/contrib/ceph/ci-setup.sh delete mode 100644 vendor/github.com/docker/distribution/contrib/compose/README.md delete mode 100644 vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml delete mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile delete mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/README.md delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/install_certs.sh delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd delete mode 100755 vendor/github.com/docker/distribution/contrib/docker-integration/run.sh delete mode 100755 vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh delete mode 100755 vendor/github.com/docker/distribution/contrib/docker-integration/test_runner.sh delete mode 100644 vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats delete mode 100644 vendor/github.com/docker/distribution/docs/Dockerfile delete mode 100644 vendor/github.com/docker/distribution/docs/Makefile delete mode 100644 vendor/github.com/docker/distribution/docs/architecture.md delete mode 100644 vendor/github.com/docker/distribution/docs/authentication.md delete mode 100644 vendor/github.com/docker/distribution/docs/building.md delete mode 100644 vendor/github.com/docker/distribution/docs/configuration.md delete mode 100644 vendor/github.com/docker/distribution/docs/deploying.md delete mode 100644 vendor/github.com/docker/distribution/docs/glossary.md delete mode 100644 vendor/github.com/docker/distribution/docs/help.md delete mode 100644 vendor/github.com/docker/distribution/docs/images/notifications.gliffy delete mode 100644 vendor/github.com/docker/distribution/docs/images/notifications.png delete mode 100644 vendor/github.com/docker/distribution/docs/images/notifications.svg delete mode 100644 vendor/github.com/docker/distribution/docs/images/registry.gliffy delete mode 100644 vendor/github.com/docker/distribution/docs/images/registry.png delete mode 100644 vendor/github.com/docker/distribution/docs/images/registry.svg delete mode 100644 vendor/github.com/docker/distribution/docs/index.md delete mode 100644 vendor/github.com/docker/distribution/docs/introduction.md delete mode 100644 vendor/github.com/docker/distribution/docs/migration.md delete mode 100644 vendor/github.com/docker/distribution/docs/mirror.md delete mode 100644 vendor/github.com/docker/distribution/docs/mkdocs.yml delete mode 100644 vendor/github.com/docker/distribution/docs/notifications.md delete mode 100644 vendor/github.com/docker/distribution/docs/osx-setup-guide.md delete mode 100644 vendor/github.com/docker/distribution/docs/osx/com.docker.registry.plist delete mode 100644 vendor/github.com/docker/distribution/docs/osx/config.yml delete mode 100644 vendor/github.com/docker/distribution/docs/spec/api.md delete mode 100644 vendor/github.com/docker/distribution/docs/spec/api.md.tmpl delete mode 100644 vendor/github.com/docker/distribution/docs/spec/auth/token.md delete mode 100644 vendor/github.com/docker/distribution/docs/spec/implementations.md delete mode 100644 vendor/github.com/docker/distribution/docs/spec/json.md delete mode 100644 vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md delete mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/azure.md delete mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md delete mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md delete mode 100755 vendor/github.com/docker/distribution/docs/storage-drivers/oss.md delete mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/rados.md delete mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/s3.md delete mode 100644 vendor/github.com/docker/distribution/docs/storage-drivers/swift.md delete mode 100644 vendor/github.com/docker/distribution/docs/storagedrivers.md delete mode 100644 vendor/github.com/docker/distribution/health/api/api.go delete mode 100644 vendor/github.com/docker/distribution/health/api/api_test.go delete mode 100644 vendor/github.com/docker/distribution/health/checks/checks.go delete mode 100644 vendor/github.com/docker/distribution/health/checks/checks_test.go delete mode 100644 vendor/github.com/docker/distribution/health/doc.go delete mode 100644 vendor/github.com/docker/distribution/health/health.go delete mode 100644 vendor/github.com/docker/distribution/health/health_test.go create mode 100644 vendor/github.com/docker/distribution/manifest/doc.go rename vendor/github.com/docker/distribution/manifest/{ => schema1}/manifest.go (89%) rename vendor/github.com/docker/distribution/manifest/{ => schema1}/manifest_test.go (95%) rename vendor/github.com/docker/distribution/manifest/{ => schema1}/sign.go (98%) rename vendor/github.com/docker/distribution/manifest/{ => schema1}/verify.go (98%) create mode 100644 vendor/github.com/docker/distribution/manifest/versioned.go delete mode 100644 vendor/github.com/docker/distribution/notifications/bridge.go delete mode 100644 vendor/github.com/docker/distribution/notifications/bridge_test.go delete mode 100644 vendor/github.com/docker/distribution/notifications/endpoint.go delete mode 100644 vendor/github.com/docker/distribution/notifications/event.go delete mode 100644 vendor/github.com/docker/distribution/notifications/event_test.go delete mode 100644 vendor/github.com/docker/distribution/notifications/http.go delete mode 100644 vendor/github.com/docker/distribution/notifications/http_test.go delete mode 100644 vendor/github.com/docker/distribution/notifications/listener.go delete mode 100644 vendor/github.com/docker/distribution/notifications/listener_test.go delete mode 100644 vendor/github.com/docker/distribution/notifications/metrics.go delete mode 100644 vendor/github.com/docker/distribution/notifications/sinks.go delete mode 100644 vendor/github.com/docker/distribution/notifications/sinks_test.go delete mode 100644 vendor/github.com/docker/distribution/project/dev-image/Dockerfile delete mode 100644 vendor/github.com/docker/distribution/project/hooks/README.md delete mode 100755 vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh delete mode 100755 vendor/github.com/docker/distribution/project/hooks/pre-commit create mode 100644 vendor/github.com/docker/distribution/reference/reference.go create mode 100644 vendor/github.com/docker/distribution/reference/reference_test.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/names.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/names_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/auth.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/silly/access.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/silly/access_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/token/stringset.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/token/token.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/token/token_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/auth/token/util.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/api_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/app.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/app_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/basicauth.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/blob.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/blobupload.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/catalog.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/context.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/helpers.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/hmac.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/hmac_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/hooks.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/images.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/mail.go delete mode 100644 vendor/github.com/docker/distribution/registry/handlers/tags.go delete mode 100644 vendor/github.com/docker/distribution/registry/listener/listener.go delete mode 100644 vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go delete mode 100644 vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go delete mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxyauth.go delete mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go delete mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go delete mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go delete mode 100644 vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go delete mode 100644 vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go delete mode 100644 vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go rename vendor/github.com/docker/distribution/{cmd/registry/main.go => registry/registry.go} (64%) rename vendor/github.com/docker/distribution/registry/storage/cache/{ => cachecheck}/suite.go (93%) delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/blockid.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/base/base.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/rados/doc.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/rados/rados.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/s3/s3.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go delete mode 100644 vendor/github.com/docker/distribution/testutil/handler.go delete mode 100644 vendor/github.com/docker/distribution/testutil/tarfile.go delete mode 100644 vendor/github.com/docker/distribution/version/print.go delete mode 100644 vendor/github.com/docker/distribution/version/version.go delete mode 100755 vendor/github.com/docker/distribution/version/version.sh create mode 100644 vendor/github.com/docker/docker/Dockerfile.gccgo delete mode 100644 vendor/github.com/docker/docker/api/api_unit_test.go create mode 100644 vendor/github.com/docker/docker/api/client/ps/formatter_test.go delete mode 100644 vendor/github.com/docker/docker/api/client/service.go create mode 100644 vendor/github.com/docker/docker/api/client/trust_test.go create mode 100644 vendor/github.com/docker/docker/api/client/volume.go create mode 100644 vendor/github.com/docker/docker/api/common_test.go create mode 100644 vendor/github.com/docker/docker/api/fixtures/keyfile delete mode 100644 vendor/github.com/docker/docker/api/server/form.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/form.go rename vendor/github.com/docker/docker/api/server/{ => httputils}/form_test.go (55%) create mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware_test.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local/auth.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local/container.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local/copy.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local/exec.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local/image.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local/info.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local/inspect.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local/local.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/network.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/network_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/router.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/server_experimental.go delete mode 100644 vendor/github.com/docker/docker/api/server/server_stub.go create mode 100644 vendor/github.com/docker/docker/api/server/server_test.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md create mode 100644 vendor/github.com/docker/docker/api/types/versions/v1p19/types.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/v1p20/types.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_experimental.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_linux_test.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_stub.go rename vendor/github.com/docker/docker/{api/server/server_linux_test.go => daemon/daemon_unix_test.go} (59%) create mode 100644 vendor/github.com/docker/docker/daemon/daemon_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/delete_test.go create mode 100644 vendor/github.com/docker/docker/daemon/discovery.go create mode 100644 vendor/github.com/docker/docker/daemon/errors.go create mode 100644 vendor/github.com/docker/docker/daemon/events.go delete mode 100644 vendor/github.com/docker/docker/daemon/exec_freebsd.go delete mode 100644 vendor/github.com/docker/docker/daemon/exec_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/exec_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/list_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/list_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/mounts.go create mode 100644 vendor/github.com/docker/docker/daemon/network.go create mode 100644 vendor/github.com/docker/docker/daemon/state_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/state_windows.go rename vendor/github.com/docker/docker/daemon/{top.go => top_unix.go} (66%) create mode 100644 vendor/github.com/docker/docker/daemon/top_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/utils_freebsd.go delete mode 100644 vendor/github.com/docker/docker/daemon/utils_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/volumes_linux_unit_test.go create mode 100644 vendor/github.com/docker/docker/dockerversion/version_lib.go create mode 100644 vendor/github.com/docker/docker/errors/README.md create mode 100644 vendor/github.com/docker/docker/errors/builder.go create mode 100644 vendor/github.com/docker/docker/errors/daemon.go create mode 100644 vendor/github.com/docker/docker/errors/error.go create mode 100644 vendor/github.com/docker/docker/errors/image.go create mode 100644 vendor/github.com/docker/docker/errors/server.go delete mode 100644 vendor/github.com/docker/docker/graph/graph_unix.go delete mode 100644 vendor/github.com/docker/docker/graph/graph_windows.go delete mode 100644 vendor/github.com/docker/docker/graph/mutex.go create mode 100644 vendor/github.com/docker/docker/graph/pull_v2_test.go create mode 100644 vendor/github.com/docker/docker/image/fixtures/post1.9/expected_computed_id create mode 100644 vendor/github.com/docker/docker/image/fixtures/post1.9/expected_config create mode 100644 vendor/github.com/docker/docker/image/fixtures/post1.9/layer_id create mode 100644 vendor/github.com/docker/docker/image/fixtures/post1.9/parent_id create mode 100644 vendor/github.com/docker/docker/image/fixtures/post1.9/v1compatibility create mode 100644 vendor/github.com/docker/docker/image/fixtures/pre1.9/expected_computed_id create mode 100644 vendor/github.com/docker/docker/image/fixtures/pre1.9/expected_config create mode 100644 vendor/github.com/docker/docker/image/fixtures/pre1.9/layer_id create mode 100644 vendor/github.com/docker/docker/image/fixtures/pre1.9/parent_id create mode 100644 vendor/github.com/docker/docker/image/fixtures/pre1.9/v1compatibility create mode 100644 vendor/github.com/docker/docker/image/image_test.go create mode 100644 vendor/github.com/docker/docker/opts/opts_unix.go create mode 100644 vendor/github.com/docker/docker/opts/opts_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/httputils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go rename vendor/github.com/docker/docker/pkg/parsers/kernel/{kernel_test.go => kernel_unix_test.go} (95%) rename vendor/github.com/docker/docker/pkg/parsers/operatingsystem/{operatingsystem_test.go => operatingsystem_unix_test.go} (54%) create mode 100644 vendor/github.com/docker/docker/pkg/signal/README.md create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/trap.go create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/README.md create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/stringutils.go create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/strslice.go create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/strslice_test.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go rename vendor/github.com/docker/docker/pkg/system/{lstat_test.go => lstat_unix_test.go} (95%) rename vendor/github.com/docker/docker/pkg/system/{meminfo_linux_test.go => meminfo_unix_test.go} (97%) rename vendor/github.com/docker/docker/pkg/system/{stat_test.go => stat_unix_test.go} (88%) create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows.go rename vendor/github.com/docker/docker/pkg/system/{utimes_test.go => utimes_unix_test.go} (98%) create mode 100644 vendor/github.com/docker/docker/registry/config_unix.go create mode 100644 vendor/github.com/docker/docker/registry/config_windows.go create mode 100644 vendor/github.com/docker/docker/registry/service_v1.go create mode 100644 vendor/github.com/docker/docker/registry/service_v2.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_unix.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_windows.go rename vendor/github.com/docker/docker/runconfig/fixtures/{ => unix}/container_config_1_14.json (100%) rename vendor/github.com/docker/docker/runconfig/fixtures/{ => unix}/container_config_1_17.json (97%) rename vendor/github.com/docker/docker/runconfig/fixtures/{ => unix}/container_config_1_19.json (98%) rename vendor/github.com/docker/docker/runconfig/fixtures/{ => unix}/container_hostconfig_1_14.json (100%) rename vendor/github.com/docker/docker/runconfig/fixtures/{ => unix}/container_hostconfig_1_19.json (100%) create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json delete mode 100644 vendor/github.com/docker/docker/runconfig/parse_experimental.go delete mode 100644 vendor/github.com/docker/docker/runconfig/parse_stub.go delete mode 100644 vendor/github.com/docker/docker/runconfig/parse_unix.go delete mode 100644 vendor/github.com/docker/docker/runconfig/parse_windows.go create mode 100644 vendor/github.com/docker/docker/utils/names.go create mode 100644 vendor/github.com/docker/docker/utils/timeout.go create mode 100644 vendor/github.com/docker/docker/utils/utils_unix.go create mode 100644 vendor/github.com/docker/docker/utils/utils_windows.go delete mode 100644 vendor/github.com/docker/docker/volume/drivers/api.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/extpoint_test.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_test.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_unix.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_windows.go create mode 100644 vendor/github.com/docker/docker/volume/store/store.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_test.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_unix.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_windows.go create mode 100644 vendor/github.com/docker/docker/volume/testutils/testutils.go create mode 100644 vendor/github.com/docker/docker/volume/volume_test.go create mode 100644 vendor/github.com/docker/docker/volume/volume_unix.go create mode 100644 vendor/github.com/docker/docker/volume/volume_windows.go delete mode 100644 vendor/github.com/docker/libcompose/generate.go create mode 100644 vendor/github.com/docker/libnetwork/Vagrantfile create mode 100644 vendor/github.com/docker/libnetwork/default_gateway.go create mode 100644 vendor/github.com/docker/libnetwork/default_gateway_freebsd.go create mode 100644 vendor/github.com/docker/libnetwork/default_gateway_linux.go create mode 100644 vendor/github.com/docker/libnetwork/default_gateway_windows.go create mode 100644 vendor/github.com/docker/libnetwork/drivers.go create mode 100644 vendor/github.com/docker/libnetwork/endpoint_cnt.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_externalkey.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_store.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_test.go delete mode 100644 vendor/github.com/docker/libnetwork/sandboxdata.go delete mode 100644 vendor/github.com/docker/libnetwork/sandboxdata_test.go create mode 100644 vendor/github.com/docker/libnetwork/store_test.go create mode 100755 vendor/github.com/docker/libnetwork/wrapmake.sh create mode 100644 vendor/github.com/opencontainers/runc/exec.go create mode 100644 vendor/github.com/opencontainers/runc/kill.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go create mode 100644 vendor/github.com/opencontainers/runc/pause.go create mode 100644 vendor/github.com/opencontainers/runc/rlimit_linux.go delete mode 100644 vendor/github.com/opencontainers/runc/run.go create mode 100644 vendor/github.com/opencontainers/runc/start.go create mode 100644 vendor/github.com/vishvananda/netlink/class.go create mode 100644 vendor/github.com/vishvananda/netlink/class_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/class_test.go create mode 100644 vendor/github.com/vishvananda/netlink/filter.go create mode 100644 vendor/github.com/vishvananda/netlink/filter_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/filter_test.go create mode 100644 vendor/github.com/vishvananda/netlink/link_tuntap_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/tc_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/tc_linux_test.go create mode 100644 vendor/github.com/vishvananda/netlink/qdisc.go create mode 100644 vendor/github.com/vishvananda/netlink/qdisc_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/qdisc_test.go diff --git a/.dockerignore b/.dockerignore index 4d50ab09..b6091a10 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,7 @@ .DS_Store .git .idea +.vendor .dockerignore bin gopath diff --git a/.dockerignore.docker b/.dockerignore.docker index 0b3eb86d..c8b6127d 100644 --- a/.dockerignore.docker +++ b/.dockerignore.docker @@ -1,6 +1,7 @@ .DS_Store .git .idea +.vendor .dockerignore bin gopath diff --git a/Dockerfile.build-base b/Dockerfile.build-base index 2754fe34..e8526d0f 100644 --- a/Dockerfile.build-base +++ b/Dockerfile.build-base @@ -13,13 +13,12 @@ RUN chmod +x /usr/bin/docker RUN pip install tox -RUN curl -sSL https://storage.googleapis.com/golang/go1.4.3.linux-amd64.tar.gz | tar -xz -C /usr/local +RUN curl -sSL https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz | tar -xz -C /usr/local ENV PATH $PATH:/usr/local/go/bin RUN mkdir -p /go/src /go/bin && chmod -R 777 /go ENV GOPATH /go ENV PATH /go/bin:$PATH - -RUN go get github.com/tools/godep +ENV GO15VENDOREXPERIMENT 1 WORKDIR /go/src/github.com/rancher/os diff --git a/Makefile.docker b/Makefile.docker index 47533682..932e35cc 100644 --- a/Makefile.docker +++ b/Makefile.docker @@ -1,12 +1,11 @@ include build.conf FORCE_PULL := 0 DEV_BUILD := 0 -GODEP := godep bin/rancheros: mkdir -p $(dir $@) - $(GODEP) go build -tags netgo -installsuffix netgo -ldflags "-X github.com/rancher/os/config.VERSION $(VERSION) -linkmode external -extldflags -static" -o $@ + go build -tags netgo -installsuffix netgo -ldflags "-X github.com/rancher/os/config.VERSION $(VERSION) -linkmode external -extldflags -static" -o $@ strip --strip-all $@ diff --git a/build.conf b/build.conf index cb666f52..c5a02a96 100644 --- a/build.conf +++ b/build.conf @@ -3,4 +3,4 @@ VERSION=v0.4.2-dev DOCKER_BINARY_URL=https://github.com/rancher/docker/releases/download/v1.9.1-ros1/docker-1.9.1 COMPILED_KERNEL_URL=https://github.com/rancher/os-kernel/releases/download/Ubuntu-4.2.0-16.19/linux-4.2.3-rancher-x86.tar.gz -DFS_IMAGE=rancher/docker:1.9.1 +DFS_IMAGE=rancher/docker:1.9.1-1 diff --git a/cmd/cloudinit/cloudinit.go b/cmd/cloudinit/cloudinit.go index 9f0c0953..89eb9406 100644 --- a/cmd/cloudinit/cloudinit.go +++ b/cmd/cloudinit/cloudinit.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "gopkg.in/yaml.v2" + yaml "github.com/cloudfoundry-incubator/candiedyaml" log "github.com/Sirupsen/logrus" "github.com/coreos/coreos-cloudinit/config" diff --git a/cmd/cloudinit/gce.go b/cmd/cloudinit/gce.go index 9f28c8df..c42273e5 100644 --- a/cmd/cloudinit/gce.go +++ b/cmd/cloudinit/gce.go @@ -5,8 +5,8 @@ import ( "strings" log "github.com/Sirupsen/logrus" + yaml "github.com/cloudfoundry-incubator/candiedyaml" "google.golang.org/cloud/compute/metadata" - "gopkg.in/yaml.v2" ) type GceCloudConfig struct { diff --git a/cmd/control/config.go b/cmd/control/config.go index 0831248b..75bdc1d2 100644 --- a/cmd/control/config.go +++ b/cmd/control/config.go @@ -9,7 +9,7 @@ import ( "strings" log "github.com/Sirupsen/logrus" - "gopkg.in/yaml.v2" + yaml "github.com/cloudfoundry-incubator/candiedyaml" "github.com/codegangsta/cli" "github.com/rancher/os/config" diff --git a/cmd/control/os.go b/cmd/control/os.go index 84627e25..aff2ada2 100644 --- a/cmd/control/os.go +++ b/cmd/control/os.go @@ -10,7 +10,7 @@ import ( "strings" log "github.com/Sirupsen/logrus" - "gopkg.in/yaml.v2" + yaml "github.com/cloudfoundry-incubator/candiedyaml" dockerClient "github.com/fsouza/go-dockerclient" diff --git a/cmd/network/network.go b/cmd/network/network.go index 2c95a879..51f8cba3 100644 --- a/cmd/network/network.go +++ b/cmd/network/network.go @@ -42,7 +42,7 @@ func Main() { } if cfg.Rancher.Network.Dns.Override { log.WithFields(log.Fields{"nameservers": cfg.Rancher.Network.Dns.Nameservers}).Info("Override nameservers") - if err := resolvconf.Build("/etc/resolv.conf", cfg.Rancher.Network.Dns.Nameservers, cfg.Rancher.Network.Dns.Search); err != nil { + if _, err := resolvconf.Build("/etc/resolv.conf", cfg.Rancher.Network.Dns.Nameservers, cfg.Rancher.Network.Dns.Search, nil); err != nil { log.Error(err) } } diff --git a/cmd/userdocker/main.go b/cmd/userdocker/main.go index aae982e1..0fdcfff0 100644 --- a/cmd/userdocker/main.go +++ b/cmd/userdocker/main.go @@ -182,7 +182,7 @@ func getPid(service string, project *project.Project) (int, error) { return 0, err } - id, err := containers[0].Id() + id, err := containers[0].ID() if err != nil { return 0, err } diff --git a/compose/project.go b/compose/project.go index 0828aa27..3bc96fb1 100644 --- a/compose/project.go +++ b/compose/project.go @@ -57,9 +57,9 @@ func newProject(name string, cfg *config.CloudConfig) (*project.Project, error) ClientFactory: clientFactory, Context: project.Context{ ProjectName: name, + NoRecreate: true, // for libcompose to not recreate on project reload, looping up the boot :) EnvironmentLookup: rosDocker.NewConfigEnvironment(cfg), ServiceFactory: serviceFactory, - Rebuild: true, Log: cfg.Rancher.Log, LoggerFactory: logger.NewColorLoggerFactory(), }, @@ -73,7 +73,7 @@ func addServices(p *project.Project, enabled map[interface{}]interface{}, config // Note: we ignore errors while loading services unchanged := true for name, serviceConfig := range configs { - hash := project.GetServiceHash(name, *serviceConfig) + hash := project.GetServiceHash(name, serviceConfig) if enabled[name] == hash { continue @@ -94,7 +94,7 @@ func addServices(p *project.Project, enabled map[interface{}]interface{}, config } func newCoreServiceProject(cfg *config.CloudConfig, network bool) (*project.Project, error) { - projectEvents := make(chan project.ProjectEvent) + projectEvents := make(chan project.Event) enabled := map[interface{}]interface{}{} p, err := newProject("os", cfg) @@ -143,7 +143,7 @@ func newCoreServiceProject(cfg *config.CloudConfig, network bool) (*project.Proj go func() { for event := range projectEvents { - if event.Event == project.CONTAINER_STARTED && event.ServiceName == "network" { + if event.EventType == project.EventContainerStarted && event.ServiceName == "network" { network = true } } diff --git a/config/config.go b/config/config.go index 15d3e1ab..11efa5dd 100644 --- a/config/config.go +++ b/config/config.go @@ -2,8 +2,8 @@ package config import ( log "github.com/Sirupsen/logrus" + yaml "github.com/cloudfoundry-incubator/candiedyaml" "github.com/rancher/os/util" - "gopkg.in/yaml.v2" ) func (c *CloudConfig) Import(bytes []byte) (*CloudConfig, error) { diff --git a/config/config_test.go b/config/config_test.go index 4b0f94d8..2a1b726e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2,7 +2,7 @@ package config import ( "fmt" - "gopkg.in/yaml.v2" + yaml "github.com/cloudfoundry-incubator/candiedyaml" "testing" "github.com/rancher/os/util" @@ -215,6 +215,7 @@ type Data struct { } func TestMapMerge(t *testing.T) { + assert := require.New(t) one := ` one: two: true` @@ -222,34 +223,20 @@ one: one: three: true` - data := make(map[string]map[string]bool) - yaml.Unmarshal([]byte(one), data) - yaml.Unmarshal([]byte(two), data) + data := map[string]map[string]bool{} + yaml.Unmarshal([]byte(one), &data) + yaml.Unmarshal([]byte(two), &data) - if _, ok := data["one"]; !ok { - t.Fatal("one not found") - } - - if !data["one"]["three"] { - t.Fatal("three not found") - } - - if data["one"]["two"] { - t.Fatal("two not found") - } + assert.NotNil(data["one"]) + assert.True(data["one"]["three"]) + assert.False(data["one"]["two"]) data2 := &OuterData{} yaml.Unmarshal([]byte(one), data2) yaml.Unmarshal([]byte(two), data2) - if !data2.One.Three { - t.Fatal("three not found") - } - - if !data2.One.Two { - t.Fatal("two not found") - } - + assert.True(data2.One.Three) + assert.True(data2.One.Two) } func TestUserDocker(t *testing.T) { @@ -266,16 +253,19 @@ func TestUserDocker(t *testing.T) { bytes, err := yaml.Marshal(config) assert.Nil(err) - config = NewConfig() + config = &CloudConfig{} + assert.False(config.Rancher.Docker.TLS) err = yaml.Unmarshal(bytes, config) assert.Nil(err) + assert.True(config.Rancher.Docker.TLS) - data := map[interface{}]map[interface{}]interface{}{} - util.Convert(config, &data) + data := map[interface{}]interface{}{} + err = util.Convert(config, &data) + assert.Nil(err) fmt.Println(data) - val, ok := data["rancher"]["docker"] + val, ok := data["rancher"].(map[interface{}]interface{})["docker"] assert.True(ok) m, ok := val.(map[interface{}]interface{}) diff --git a/config/disk.go b/config/disk.go index dad890a7..e0fb053e 100644 --- a/config/disk.go +++ b/config/disk.go @@ -7,11 +7,11 @@ import ( "strings" log "github.com/Sirupsen/logrus" + yaml "github.com/cloudfoundry-incubator/candiedyaml" "github.com/coreos/coreos-cloudinit/datasource" "github.com/coreos/coreos-cloudinit/initialize" "github.com/docker/libcompose/project" "github.com/rancher/os/util" - "gopkg.in/yaml.v2" ) var osConfig *CloudConfig @@ -32,6 +32,7 @@ func ReadConfig(bytes []byte, substituteMetadataVars bool, files ...string) (*Cl return nil, err } c, _ = amendNils(c) + c, _ = amendContainerNames(c) return c, nil } else { return nil, err @@ -42,7 +43,8 @@ func LoadConfig() (*CloudConfig, error) { cfg, err := ChainCfgFuncs(NewConfig(), readFilesAndMetadata, readCmdline, - amendNils) + amendNils, + amendContainerNames) if err != nil { log.WithFields(log.Fields{"cfg": cfg, "err": err}).Error("Failed to load config") return nil, err @@ -130,7 +132,7 @@ func readFilesAndMetadata(c *CloudConfig) (*CloudConfig, error) { files := append(CloudConfigDirFiles(), CloudConfigFile) data, err := readConfig(nil, true, files...) if err != nil { - log.WithFields(log.Fields{"err": err}).Error("Error reading config files") + log.WithFields(log.Fields{"err": err, "files": files}).Error("Error reading config files") return c, err } @@ -186,6 +188,20 @@ func amendNils(c *CloudConfig) (*CloudConfig, error) { return &t, nil } +func amendContainerNames(c *CloudConfig) (*CloudConfig, error) { + for _, scm := range []map[string]*project.ServiceConfig{ + c.Rancher.Autoformat, + c.Rancher.BootstrapContainers, + c.Rancher.Services, + } { + for k, v := range scm { + v.Name = k + v.ContainerName = k + } + } + return c, nil +} + func writeToFile(data interface{}, filename string) error { content, err := yaml.Marshal(data) if err != nil { @@ -222,6 +238,9 @@ func readConfig(bytes []byte, substituteMetadataVars bool, files ...string) (map if err != nil { return nil, err } + if len(content) == 0 { + continue + } if substituteMetadataVars { content = substituteVars(content, metadata) } diff --git a/config/types.go b/config/types.go index d44ecaaf..52c1e49c 100644 --- a/config/types.go +++ b/config/types.go @@ -27,6 +27,7 @@ const ( CREATE_ONLY = "io.rancher.os.createonly" RELOAD_CONFIG = "io.rancher.os.reloadconfig" SCOPE = "io.rancher.os.scope" + REBUILD = "io.docker.compose.rebuild" SYSTEM = "system" OsConfigFile = "/usr/share/ros/os-config.yml" diff --git a/docker/client_factory.go b/docker/client_factory.go index 4be5292e..b0f479b1 100644 --- a/docker/client_factory.go +++ b/docker/client_factory.go @@ -4,17 +4,17 @@ import ( "fmt" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/libcompose/docker" "github.com/docker/libcompose/project" - "github.com/docker/machine/log" + dockerclient "github.com/fsouza/go-dockerclient" "github.com/rancher/os/config" "github.com/rancher/os/util" - "github.com/samalba/dockerclient" ) type ClientFactory struct { - userClient dockerclient.Client - systemClient dockerclient.Client + userClient *dockerclient.Client + systemClient *dockerclient.Client userOnce sync.Once systemOnce sync.Once } @@ -42,7 +42,7 @@ func NewClientFactory(opts docker.ClientOpts) (docker.ClientFactory, error) { }, nil } -func (c *ClientFactory) Create(service project.Service) dockerclient.Client { +func (c *ClientFactory) Create(service project.Service) *dockerclient.Client { if IsSystemContainer(service.Config()) { waitFor(&c.systemOnce, c.systemClient, config.DOCKER_SYSTEM_HOST) return c.systemClient @@ -52,7 +52,7 @@ func (c *ClientFactory) Create(service project.Service) dockerclient.Client { return c.userClient } -func waitFor(once *sync.Once, client dockerclient.Client, endpoint string) { +func waitFor(once *sync.Once, client *dockerclient.Client, endpoint string) { once.Do(func() { err := ClientOK(endpoint, func() bool { _, err := client.Info() diff --git a/docker/service.go b/docker/service.go index ea9351d0..f85907f8 100644 --- a/docker/service.go +++ b/docker/service.go @@ -1,13 +1,11 @@ package docker import ( - "fmt" - + "github.com/Sirupsen/logrus" "github.com/docker/libcompose/docker" "github.com/docker/libcompose/project" - "github.com/docker/machine/log" + dockerclient "github.com/fsouza/go-dockerclient" "github.com/rancher/os/config" - "github.com/samalba/dockerclient" ) type Service struct { @@ -68,23 +66,71 @@ func appendLink(deps []project.ServiceRelationship, name string, optional bool, if _, ok := p.Configs[name]; !ok { return deps } - rel := project.NewServiceRelationship(name, project.REL_TYPE_LINK) + rel := project.NewServiceRelationship(name, project.RelTypeLink) rel.Optional = optional return append(deps, rel) } +func (s *Service) shouldRebuild() (bool, error) { + containers, err := s.Containers() + if err != nil { + return false, err + } + for _, c := range containers { + outOfSync, err := c.(*docker.Container).OutOfSync(s.Service.Config().Image) + if err != nil { + return false, err + } + + _, containerInfo, err := s.getContainer() + if containerInfo == nil || err != nil { + return false, err + } + name := containerInfo.Name[1:] + + origRebuildLabel := containerInfo.Config.Labels[config.REBUILD] + newRebuildLabel := s.Config().Labels.MapParts()[config.REBUILD] + rebuildLabelChanged := newRebuildLabel != origRebuildLabel + logrus.WithFields(logrus.Fields{ + "origRebuildLabel": origRebuildLabel, + "newRebuildLabel": newRebuildLabel, + "rebuildLabelChanged": rebuildLabelChanged, + "outOfSync": outOfSync}).Debug("Rebuild values") + + if origRebuildLabel == "always" || rebuildLabelChanged || origRebuildLabel != "false" && outOfSync { + logrus.Infof("Rebuilding %s", name) + return true, err + } else if outOfSync { + logrus.Warnf("%s needs rebuilding", name) + } + } + return false, nil +} + func (s *Service) Up() error { labels := s.Config().Labels.MapParts() if err := s.Service.Create(); err != nil { return err } - if err := s.rename(); err != nil { - return err - } if labels[config.CREATE_ONLY] == "true" { return s.checkReload(labels) } + shouldRebuild, err := s.shouldRebuild() + if err != nil { + return err + } + if shouldRebuild { + cs, err := s.Service.Containers() + if err != nil { + return err + } + for _, c := range cs { + if _, err := c.(*docker.Container).Recreate(s.Config().Image); err != nil { + return err + } + } + } if err := s.Service.Up(); err != nil { return err } @@ -105,13 +151,10 @@ func (s *Service) checkReload(labels map[string]string) error { } func (s *Service) Create() error { - if err := s.Service.Create(); err != nil { - return err - } - return s.rename() + return s.Service.Create() } -func (s *Service) getContainer() (dockerclient.Client, *dockerclient.ContainerInfo, error) { +func (s *Service) getContainer() (*dockerclient.Client, *dockerclient.Container, error) { containers, err := s.Service.Containers() if err != nil { return nil, nil, err @@ -121,7 +164,7 @@ func (s *Service) getContainer() (dockerclient.Client, *dockerclient.ContainerIn return nil, nil, nil } - id, err := containers[0].Id() + id, err := containers[0].ID() if err != nil { return nil, nil, err } @@ -137,28 +180,9 @@ func (s *Service) wait() error { return err } - status := <-client.Wait(info.Id) - if status.Error != nil { - return status.Error - } - - if status.ExitCode == 0 { - return nil - } else { - return fmt.Errorf("ExitCode %d", status.ExitCode) - } -} - -func (s *Service) rename() error { - client, info, err := s.getContainer() - if err != nil || info == nil { + if _, err := client.WaitContainer(info.ID); err != nil { return err } - if len(info.Name) > 0 && info.Name[1:] != s.Name() { - log.Debugf("Renaming container %s => %s", info.Name[1:], s.Name()) - return client.RenameContainer(info.Name[1:], s.Name()) - } else { - return nil - } + return nil } diff --git a/glide.yaml b/glide.yaml index 4936c670..8b5a4738 100644 --- a/glide.yaml +++ b/glide.yaml @@ -10,7 +10,7 @@ import: - . - package: github.com/codegangsta/cli - version: 8ce64f19ff08029a69d11b7615c9b591245450ad + version: 0302d3914d2a6ad61404584cdae6e6dbc9c03599 - package: github.com/coreos/coreos-cloudinit version: 405c2600b19ae77516c967f8ee8ebde5624d3663 @@ -32,15 +32,30 @@ import: version: 6b16a5714269b2f70720a45406b1babd947a17ef - package: github.com/docker/distribution - version: a0c63372fad430b7ab08d2763cb7d9e2c512c384 + version: c6c9194e9c6097f84b0ff468a741086ff7704aa3 + subpackages: + - context + - digest + - manifest/schema1 + - reference + - registry/api/errcode + - registry/api/v2 + - registry/client + - registry/client/auth + - registry/client/transport + - registry/storage/cache + - registry/storage/cache/memory + - uuid - package: github.com/docker/docker - version: f39987afe8d611407887b3094c03d6ba6a766a67 + version: 58b270c338e831ac6668a29788c72d202f9fc251 subpackages: - api - autogen/dockerversion - cliconfig - daemon/network + - dockerversion + - errors - graph/tags - image - opts @@ -48,6 +63,7 @@ import: - pkg/fileutils - pkg/homedir - pkg/httputils + - pkg/idtools - pkg/ioutils - pkg/jsonmessage - pkg/mflag @@ -58,8 +74,10 @@ import: - pkg/promise - pkg/random - pkg/reexec + - pkg/signal - pkg/stdcopy - pkg/stringid + - pkg/stringutils - pkg/symlink - pkg/system - pkg/tarsum @@ -77,7 +95,7 @@ import: - volume - package: github.com/docker/libcompose - version: 0919e089edff3ba95d84119228f46d414882ded1 + version: 3e678b6a30d314e86441cbd9ad01534b7534d53a subpackages: - cli - docker @@ -92,7 +110,7 @@ import: - netlink - package: github.com/docker/libnetwork - version: 0cc39f87276366ef6f22961ef2018d957d662724 + version: 5305ea570b85d61dd0fd261cd7e1680da1884678 subpackages: - resolvconf @@ -109,16 +127,16 @@ import: version: 3f9db97f856818214da2e1057f8ad84803971cff - package: github.com/fsouza/go-dockerclient - version: c3e8735e510cf8bc6d439300ff2fc247b1f2867c + version: b515e07d61c4e873407b54c4843b0f3ac9aa16f1 subpackages: - . - external - package: github.com/gorilla/context - version: 215affda49addc4c8ef7e2534915df2c8c35c6cd + version: 14f550f51af52180c2eefed15e5fd18d63c0a64a - package: github.com/gorilla/mux - version: f15e0c49460fd49eebe2bcc8486b05d1bef68d3a + version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf - package: github.com/guelfey/go.dbus version: f6a3a2366cc39b8479cadc499d3c735fb10fbdda @@ -134,7 +152,7 @@ import: version: d57d9d2d5be197e12d9dee142d855470d83ce62f - package: github.com/opencontainers/runc - version: b40c7901845dcec5950ecb37cb9de178fc2c0604 + version: 1349b37bd56f4f5ce2690b5b2c0f53f88a261c67 subpackages: - libcontainer/cgroups - libcontainer/configs @@ -143,7 +161,7 @@ import: - libcontainer/user - package: github.com/rancher/docker-from-scratch - version: 1.9.1 + version: 1.9.1-1 subpackages: - . - util @@ -155,13 +173,13 @@ import: version: 0067a9abd927e50aed5190662702f81231413ae0 - package: github.com/stretchr/testify - version: 089c7181b8c728499929ff09b62d3fdd8df8adff + version: a1f97990ddc16022ec7610326dd9bce31332c116 subpackages: - assert - require - package: github.com/vishvananda/netlink - version: ae3e7dba57271b4e976c4f91637861ee477135e2 + version: edcd99c0881a4de0fdb3818af6b24f4ee6948464 - package: golang.org/x/crypto version: 2f3083f6163ef51179ad42ed523a18c9a1141467 diff --git a/os-config.yml b/os-config.yml index 538616b9..7559083d 100644 --- a/os-config.yml +++ b/os-config.yml @@ -5,7 +5,7 @@ rancher: environment: - BOOTSTRAP=true labels: - io.rancher.os.detach: false + io.rancher.os.detach: "false" io.rancher.os.scope: system log_driver: json-file net: host @@ -19,7 +19,7 @@ rancher: autoformat: image: rancher/os-autoformat:v0.4.2-dev labels: - io.rancher.os.detach: false + io.rancher.os.detach: "false" io.rancher.os.scope: system log_driver: json-file net: none @@ -27,7 +27,7 @@ rancher: udev-autoformat: image: rancher/os-udev:v0.4.2-dev labels: - io.rancher.os.detach: false + io.rancher.os.detach: "false" io.rancher.os.scope: system io.rancher.os.after: autoformat log_driver: json-file @@ -72,7 +72,7 @@ rancher: all-volumes: image: rancher/os-state:v0.4.2-dev labels: - io.rancher.os.createonly: true + io.rancher.os.createonly: "true" io.rancher.os.scope: system log_driver: json-file net: none @@ -86,8 +86,8 @@ rancher: cloud-init: image: rancher/os-cloudinit:v0.4.2-dev labels: - io.rancher.os.detach: false - io.rancher.os.reloadconfig: true + io.rancher.os.detach: "false" + io.rancher.os.reloadconfig: "true" io.rancher.os.scope: system io.rancher.os.after: cloud-init-pre, wait-for-network net: host @@ -101,8 +101,8 @@ rancher: environment: - CLOUD_INIT_NETWORK=false labels: - io.rancher.os.detach: false - io.rancher.os.reloadconfig: true + io.rancher.os.detach: "false" + io.rancher.os.reloadconfig: "true" io.rancher.os.scope: system io.rancher.os.after: preload-system-images net: host @@ -114,7 +114,7 @@ rancher: command-volumes: image: rancher/os-state:v0.4.2-dev labels: - io.rancher.os.createonly: true + io.rancher.os.createonly: "true" io.rancher.os.scope: system log_driver: json-file net: none @@ -154,7 +154,7 @@ rancher: container-data-volumes: image: rancher/os-state:v0.4.2-dev labels: - io.rancher.os.createonly: true + io.rancher.os.createonly: "true" io.rancher.os.scope: system log_driver: json-file net: none @@ -179,7 +179,7 @@ rancher: image: rancher/os-network:v0.4.2-dev command: wait-for-network labels: - io.rancher.os.detach: false + io.rancher.os.detach: "false" io.rancher.os.scope: system io.rancher.os.after: network pid: host @@ -199,7 +199,7 @@ rancher: preload-system-images: image: rancher/os-preload:v0.4.2-dev labels: - io.rancher.os.detach: false + io.rancher.os.detach: "false" io.rancher.os.scope: system privileged: true volumes: @@ -211,7 +211,7 @@ rancher: preload-user-images: image: rancher/os-preload:v0.4.2-dev labels: - io.rancher.os.detach: false + io.rancher.os.detach: "false" io.rancher.os.scope: system io.rancher.os.after: console privileged: true @@ -235,7 +235,7 @@ rancher: system-volumes: image: rancher/os-state:v0.4.2-dev labels: - io.rancher.os.createonly: true + io.rancher.os.createonly: "true" io.rancher.os.scope: system log_driver: json-file net: none @@ -270,7 +270,7 @@ rancher: environment: - DAEMON=true labels: - io.rancher.os.detach: true + io.rancher.os.detach: "true" io.rancher.os.scope: system net: host uts: host @@ -281,7 +281,7 @@ rancher: user-volumes: image: rancher/os-state:v0.4.2-dev labels: - io.rancher.os.createonly: true + io.rancher.os.createonly: "true" io.rancher.os.scope: system log_driver: json-file net: none diff --git a/util/util.go b/util/util.go index f55788a1..f903e470 100644 --- a/util/util.go +++ b/util/util.go @@ -9,7 +9,7 @@ import ( "os" "strings" - "gopkg.in/yaml.v2" + yaml "github.com/cloudfoundry-incubator/candiedyaml" log "github.com/Sirupsen/logrus" diff --git a/vendor/github.com/codegangsta/cli/.travis.yml b/vendor/github.com/codegangsta/cli/.travis.yml index baf46abc..87ba52f9 100644 --- a/vendor/github.com/codegangsta/cli/.travis.yml +++ b/vendor/github.com/codegangsta/cli/.travis.yml @@ -1,5 +1,18 @@ language: go -go: 1.1 +sudo: false + +go: +- 1.0.3 +- 1.1.2 +- 1.2.2 +- 1.3.3 +- 1.4.2 +- 1.5.1 +- tip + +matrix: + allow_failures: + - go: tip script: - go vet ./... diff --git a/vendor/github.com/codegangsta/cli/README.md b/vendor/github.com/codegangsta/cli/README.md index c0bb338a..26a18386 100644 --- a/vendor/github.com/codegangsta/cli/README.md +++ b/vendor/github.com/codegangsta/cli/README.md @@ -1,18 +1,17 @@ +[![Coverage](http://gocover.io/_badge/github.com/codegangsta/cli?0)](http://gocover.io/github.com/codegangsta/cli) [![Build Status](https://travis-ci.org/codegangsta/cli.png?branch=master)](https://travis-ci.org/codegangsta/cli) +[![GoDoc](https://godoc.org/github.com/codegangsta/cli?status.svg)](https://godoc.org/github.com/codegangsta/cli) # cli.go -cli.go is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. - -You can view the API docs here: -http://godoc.org/github.com/codegangsta/cli +`cli.go` is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. ## Overview Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. -**This is where cli.go comes into play.** cli.go makes command line programming fun, organized, and expressive! +**This is where `cli.go` comes into play.** `cli.go` makes command line programming fun, organized, and expressive! ## Installation -Make sure you have a working Go environment (go 1.1 is *required*). [See the install instructions](http://golang.org/doc/install.html). +Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html). To install `cli.go`, simply run: ``` @@ -25,7 +24,7 @@ export PATH=$PATH:$GOPATH/bin ``` ## Getting Started -One of the philosophies behind cli.go is that an API should be playful and full of discovery. So a cli.go app can be as little as one line of code in `main()`. +One of the philosophies behind `cli.go` is that an API should be playful and full of discovery. So a `cli.go` app can be as little as one line of code in `main()`. ``` go package main @@ -103,7 +102,8 @@ $ greet Hello friend! ``` -cli.go also generates some bitchass help text: +`cli.go` also generates neat help text: + ``` $ greet help NAME: @@ -158,6 +158,34 @@ app.Action = func(c *cli.Context) { ... ``` +You can also set a destination variable for a flag, to which the content will be scanned. +``` go +... +var language string +app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + Destination: &language, + }, +} +app.Action = func(c *cli.Context) { + name := "someone" + if len(c.Args()) > 0 { + name = c.Args()[0] + } + if language == "spanish" { + println("Hola", name) + } else { + println("Hello", name) + } +} +... +``` + +See full list of flags at http://godoc.org/github.com/codegangsta/cli + #### Alternate Names You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g. @@ -210,7 +238,7 @@ Subcommands can be defined for a more git-like command line app. app.Commands = []cli.Command{ { Name: "add", - ShortName: "a", + Aliases: []string{"a"}, Usage: "add a task to the list", Action: func(c *cli.Context) { println("added task: ", c.Args().First()) @@ -218,7 +246,7 @@ app.Commands = []cli.Command{ }, { Name: "complete", - ShortName: "c", + Aliases: []string{"c"}, Usage: "complete a task on the list", Action: func(c *cli.Context) { println("completed task: ", c.Args().First()) @@ -226,7 +254,7 @@ app.Commands = []cli.Command{ }, { Name: "template", - ShortName: "r", + Aliases: []string{"r"}, Usage: "options for task templates", Subcommands: []cli.Command{ { @@ -244,7 +272,7 @@ app.Commands = []cli.Command{ }, }, }, - }, + }, } ... ``` @@ -262,8 +290,8 @@ app := cli.NewApp() app.EnableBashCompletion = true app.Commands = []cli.Command{ { - Name: "complete", - ShortName: "c", + Name: "complete", + Aliases: []string{"c"}, Usage: "complete a task on the list", Action: func(c *cli.Context) { println("completed task: ", c.Args().First()) @@ -289,6 +317,21 @@ setting the `PROG` variable to the name of your program: `PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` +#### To Distribute + +Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename +it to the name of the program you wish to add autocomplete support for (or +automatically install it there if you are distributing a package). Don't forget +to source the file to make it active in the current shell. + +``` + sudo cp src/bash_autocomplete /etc/bash_completion.d/ + source /etc/bash_completion.d/ +``` + +Alternatively, you can just document that users should source the generic +`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set +to the name of their program (as above). ## Contribution Guidelines Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch. diff --git a/vendor/github.com/codegangsta/cli/app.go b/vendor/github.com/codegangsta/cli/app.go index 3d4ed120..9a15c0c0 100644 --- a/vendor/github.com/codegangsta/cli/app.go +++ b/vendor/github.com/codegangsta/cli/app.go @@ -5,18 +5,20 @@ import ( "io" "io/ioutil" "os" - "text/tabwriter" - "text/template" "time" ) // App is the main structure of a cli application. It is recomended that -// and app be created with the cli.NewApp() function +// an app be created with the cli.NewApp() function type App struct { // The name of the program. Defaults to os.Args[0] Name string + // Full name of command for help, defaults to Name + HelpName string // Description of the program. Usage string + // Description of the program argument format. + ArgsUsage string // Version of the program Version string // List of commands to execute @@ -45,6 +47,8 @@ type App struct { Compiled time.Time // List of all authors who contributed Authors []Author + // Copyright of the binary if any + Copyright string // Name of Author (Note: Use App.Authors, this is deprecated) Author string // Email of Author (Note: Use App.Authors, this is deprecated) @@ -67,6 +71,7 @@ func compileTime() time.Time { func NewApp() *App { return &App{ Name: os.Args[0], + HelpName: os.Args[0], Usage: "A new cli application", Version: "0.0.0", BashComplete: DefaultAppComplete, @@ -82,21 +87,14 @@ func (a *App) Run(arguments []string) (err error) { a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) } - if HelpPrinter == nil { - defer func() { - HelpPrinter = nil - }() - - HelpPrinter = func(templ string, data interface{}) { - w := tabwriter.NewWriter(a.Writer, 0, 8, 1, '\t', 0) - t := template.Must(template.New("help").Parse(templ)) - err := t.Execute(w, data) - if err != nil { - panic(err) - } - w.Flush() + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) } + newCmds = append(newCmds, c) } + a.Commands = newCmds // append help to commands if a.Command(helpCommand.Name) == nil && !a.HideHelp { @@ -122,17 +120,16 @@ func (a *App) Run(arguments []string) (err error) { nerr := normalizeFlags(a.Flags, set) if nerr != nil { fmt.Fprintln(a.Writer, nerr) - context := NewContext(a, set, set) + context := NewContext(a, set, nil) ShowAppHelp(context) - fmt.Fprintln(a.Writer) return nerr } - context := NewContext(a, set, set) + context := NewContext(a, set, nil) if err != nil { - fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n") - ShowAppHelp(context) + fmt.Fprintln(a.Writer, "Incorrect Usage.") fmt.Fprintln(a.Writer) + ShowAppHelp(context) return err } @@ -140,20 +137,26 @@ func (a *App) Run(arguments []string) (err error) { return nil } - if checkHelp(context) { + if !a.HideHelp && checkHelp(context) { + ShowAppHelp(context) return nil } - if checkVersion(context) { + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) return nil } if a.After != nil { defer func() { - // err is always nil here. - // There is a check to see if it is non-nil - // just few lines before. - err = a.After(context) + afterErr := a.After(context) + if afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } }() } @@ -198,6 +201,15 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } } + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + // append flags if a.EnableBashCompletion { a.appendFlag(BashCompletionFlag) @@ -208,21 +220,22 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { set.SetOutput(ioutil.Discard) err = set.Parse(ctx.Args().Tail()) nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, ctx.globalSet) + context := NewContext(a, set, ctx) if nerr != nil { fmt.Fprintln(a.Writer, nerr) + fmt.Fprintln(a.Writer) if len(a.Commands) > 0 { ShowSubcommandHelp(context) } else { ShowCommandHelp(ctx, context.Args().First()) } - fmt.Fprintln(a.Writer) return nerr } if err != nil { - fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n") + fmt.Fprintln(a.Writer, "Incorrect Usage.") + fmt.Fprintln(a.Writer) ShowSubcommandHelp(context) return err } @@ -243,10 +256,14 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { if a.After != nil { defer func() { - // err is always nil here. - // There is a check to see if it is non-nil - // just few lines before. - err = a.After(context) + afterErr := a.After(context) + if afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } }() } diff --git a/vendor/github.com/codegangsta/cli/app_test.go b/vendor/github.com/codegangsta/cli/app_test.go index 6143d364..28d8e0f1 100644 --- a/vendor/github.com/codegangsta/cli/app_test.go +++ b/vendor/github.com/codegangsta/cli/app_test.go @@ -1,59 +1,60 @@ -package cli_test +package cli import ( + "bytes" "flag" "fmt" + "io" "os" + "strings" "testing" - - "github.com/codegangsta/cli" ) -func ExampleApp() { +func ExampleApp_Run() { // set args for examples sake os.Args = []string{"greet", "--name", "Jeremy"} - app := cli.NewApp() + app := NewApp() app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, + app.Flags = []Flag{ + StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, } - app.Action = func(c *cli.Context) { + app.Action = func(c *Context) { fmt.Printf("Hello %v\n", c.String("name")) } app.Author = "Harrison" app.Email = "harrison@lolwut.com" - app.Authors = []cli.Author{{"Oliver Allen", "oliver@toyshop.com"}} + app.Authors = []Author{Author{Name: "Oliver Allen", Email: "oliver@toyshop.com"}} app.Run(os.Args) // Output: // Hello Jeremy } -func ExampleAppSubcommand() { +func ExampleApp_Run_subcommand() { // set args for examples sake os.Args = []string{"say", "hi", "english", "--name", "Jeremy"} - app := cli.NewApp() + app := NewApp() app.Name = "say" - app.Commands = []cli.Command{ + app.Commands = []Command{ { Name: "hello", - ShortName: "hi", + Aliases: []string{"hi"}, Usage: "use it to see a description", Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ + Subcommands: []Command{ { Name: "english", - ShortName: "en", + Aliases: []string{"en"}, Usage: "sends a greeting in english", Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ + Flags: []Flag{ + StringFlag{ Name: "name", Value: "Bob", Usage: "Name of the person to greet", }, }, - Action: func(c *cli.Context) { + Action: func(c *Context) { fmt.Println("Hello,", c.String("name")) }, }, @@ -66,22 +67,22 @@ func ExampleAppSubcommand() { // Hello, Jeremy } -func ExampleAppHelp() { +func ExampleApp_Run_help() { // set args for examples sake os.Args = []string{"greet", "h", "describeit"} - app := cli.NewApp() + app := NewApp() app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, + app.Flags = []Flag{ + StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, } - app.Commands = []cli.Command{ + app.Commands = []Command{ { Name: "describeit", - ShortName: "d", + Aliases: []string{"d"}, Usage: "use it to see a description", Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { + Action: func(c *Context) { fmt.Printf("i like to describe things") }, }, @@ -89,36 +90,36 @@ func ExampleAppHelp() { app.Run(os.Args) // Output: // NAME: - // describeit - use it to see a description + // greet describeit - use it to see a description // // USAGE: - // command describeit [arguments...] + // greet describeit [arguments...] // // DESCRIPTION: // This is how we describe describeit the function } -func ExampleAppBashComplete() { +func ExampleApp_Run_bashComplete() { // set args for examples sake os.Args = []string{"greet", "--generate-bash-completion"} - app := cli.NewApp() + app := NewApp() app.Name = "greet" app.EnableBashCompletion = true - app.Commands = []cli.Command{ + app.Commands = []Command{ { Name: "describeit", - ShortName: "d", + Aliases: []string{"d"}, Usage: "use it to see a description", Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { + Action: func(c *Context) { fmt.Printf("i like to describe things") }, }, { Name: "next", Usage: "next example", Description: "more stuff to see when generating bash completion", - Action: func(c *cli.Context) { + Action: func(c *Context) { fmt.Printf("the next example") }, }, @@ -136,8 +137,8 @@ func ExampleAppBashComplete() { func TestApp_Run(t *testing.T) { s := "" - app := cli.NewApp() - app.Action = func(c *cli.Context) { + app := NewApp() + app.Action = func(c *Context) { s = s + c.Args().First() } @@ -161,10 +162,10 @@ var commandAppTests = []struct { } func TestApp_Command(t *testing.T) { - app := cli.NewApp() - fooCommand := cli.Command{Name: "foobar", ShortName: "f"} - batCommand := cli.Command{Name: "batbaz", ShortName: "b"} - app.Commands = []cli.Command{ + app := NewApp() + fooCommand := Command{Name: "foobar", Aliases: []string{"f"}} + batCommand := Command{Name: "batbaz", Aliases: []string{"b"}} + app.Commands = []Command{ fooCommand, batCommand, } @@ -177,18 +178,18 @@ func TestApp_Command(t *testing.T) { func TestApp_CommandWithArgBeforeFlags(t *testing.T) { var parsedOption, firstArg string - app := cli.NewApp() - command := cli.Command{ + app := NewApp() + command := Command{ Name: "cmd", - Flags: []cli.Flag{ - cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, + Flags: []Flag{ + StringFlag{Name: "option", Value: "", Usage: "some option"}, }, - Action: func(c *cli.Context) { + Action: func(c *Context) { parsedOption = c.String("option") firstArg = c.Args().First() }, } - app.Commands = []cli.Command{command} + app.Commands = []Command{command} app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"}) @@ -197,23 +198,23 @@ func TestApp_CommandWithArgBeforeFlags(t *testing.T) { } func TestApp_RunAsSubcommandParseFlags(t *testing.T) { - var context *cli.Context + var context *Context - a := cli.NewApp() - a.Commands = []cli.Command{ + a := NewApp() + a.Commands = []Command{ { Name: "foo", - Action: func(c *cli.Context) { + Action: func(c *Context) { context = c }, - Flags: []cli.Flag{ - cli.StringFlag{ + Flags: []Flag{ + StringFlag{ Name: "lang", Value: "english", Usage: "language for the greeting", }, }, - Before: func(_ *cli.Context) error { return nil }, + Before: func(_ *Context) error { return nil }, }, } a.Run([]string{"", "foo", "--lang", "spanish", "abcd"}) @@ -226,18 +227,18 @@ func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) { var parsedOption string var args []string - app := cli.NewApp() - command := cli.Command{ + app := NewApp() + command := Command{ Name: "cmd", - Flags: []cli.Flag{ - cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, + Flags: []Flag{ + StringFlag{Name: "option", Value: "", Usage: "some option"}, }, - Action: func(c *cli.Context) { + Action: func(c *Context) { parsedOption = c.String("option") args = c.Args() }, } - app.Commands = []cli.Command{command} + app.Commands = []Command{command} app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"}) @@ -250,14 +251,14 @@ func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) { func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) { var args []string - app := cli.NewApp() - command := cli.Command{ + app := NewApp() + command := Command{ Name: "cmd", - Action: func(c *cli.Context) { + Action: func(c *Context) { args = c.Args() }, } - app.Commands = []cli.Command{command} + app.Commands = []Command{command} app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"}) @@ -269,11 +270,11 @@ func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) { func TestApp_Float64Flag(t *testing.T) { var meters float64 - app := cli.NewApp() - app.Flags = []cli.Flag{ - cli.Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, + app := NewApp() + app.Flags = []Flag{ + Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, } - app.Action = func(c *cli.Context) { + app.Action = func(c *Context) { meters = c.Float64("height") } @@ -286,21 +287,21 @@ func TestApp_ParseSliceFlags(t *testing.T) { var parsedIntSlice []int var parsedStringSlice []string - app := cli.NewApp() - command := cli.Command{ + app := NewApp() + command := Command{ Name: "cmd", - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "p", Value: &cli.IntSlice{}, Usage: "set one or more ip addr"}, - cli.StringSliceFlag{Name: "ip", Value: &cli.StringSlice{}, Usage: "set one or more ports to open"}, + Flags: []Flag{ + IntSliceFlag{Name: "p", Value: &IntSlice{}, Usage: "set one or more ip addr"}, + StringSliceFlag{Name: "ip", Value: &StringSlice{}, Usage: "set one or more ports to open"}, }, - Action: func(c *cli.Context) { + Action: func(c *Context) { parsedIntSlice = c.IntSlice("p") parsedStringSlice = c.StringSlice("ip") parsedOption = c.String("option") firstArg = c.Args().First() }, } - app.Commands = []cli.Command{command} + app.Commands = []Command{command} app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"}) @@ -339,8 +340,40 @@ func TestApp_ParseSliceFlags(t *testing.T) { } } +func TestApp_ParseSliceFlagsWithMissingValue(t *testing.T) { + var parsedIntSlice []int + var parsedStringSlice []string + + app := NewApp() + command := Command{ + Name: "cmd", + Flags: []Flag{ + IntSliceFlag{Name: "a", Usage: "set numbers"}, + StringSliceFlag{Name: "str", Usage: "set strings"}, + }, + Action: func(c *Context) { + parsedIntSlice = c.IntSlice("a") + parsedStringSlice = c.StringSlice("str") + }, + } + app.Commands = []Command{command} + + app.Run([]string{"", "cmd", "my-arg", "-a", "2", "-str", "A"}) + + var expectedIntSlice = []int{2} + var expectedStringSlice = []string{"A"} + + if parsedIntSlice[0] != expectedIntSlice[0] { + t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0]) + } + + if parsedStringSlice[0] != expectedStringSlice[0] { + t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0]) + } +} + func TestApp_DefaultStdout(t *testing.T) { - app := cli.NewApp() + app := NewApp() if app.Writer != os.Stdout { t.Error("Default output writer not set.") @@ -368,7 +401,7 @@ func (fw *mockWriter) GetWritten() (b []byte) { func TestApp_SetStdout(t *testing.T) { w := &mockWriter{} - app := cli.NewApp() + app := NewApp() app.Name = "test" app.Writer = w @@ -388,9 +421,9 @@ func TestApp_BeforeFunc(t *testing.T) { beforeError := fmt.Errorf("fail") var err error - app := cli.NewApp() + app := NewApp() - app.Before = func(c *cli.Context) error { + app.Before = func(c *Context) error { beforeRun = true s := c.String("opt") if s == "fail" { @@ -400,17 +433,17 @@ func TestApp_BeforeFunc(t *testing.T) { return nil } - app.Commands = []cli.Command{ - cli.Command{ + app.Commands = []Command{ + Command{ Name: "sub", - Action: func(c *cli.Context) { + Action: func(c *Context) { subcommandRun = true }, }, } - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "opt"}, + app.Flags = []Flag{ + StringFlag{Name: "opt"}, } // run with the Before() func succeeding @@ -454,9 +487,9 @@ func TestApp_AfterFunc(t *testing.T) { afterError := fmt.Errorf("fail") var err error - app := cli.NewApp() + app := NewApp() - app.After = func(c *cli.Context) error { + app.After = func(c *Context) error { afterRun = true s := c.String("opt") if s == "fail" { @@ -466,17 +499,17 @@ func TestApp_AfterFunc(t *testing.T) { return nil } - app.Commands = []cli.Command{ - cli.Command{ + app.Commands = []Command{ + Command{ Name: "sub", - Action: func(c *cli.Context) { + Action: func(c *Context) { subcommandRun = true }, }, } - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "opt"}, + app.Flags = []Flag{ + StringFlag{Name: "opt"}, } // run with the After() func succeeding @@ -515,14 +548,14 @@ func TestApp_AfterFunc(t *testing.T) { } func TestAppNoHelpFlag(t *testing.T) { - oldFlag := cli.HelpFlag + oldFlag := HelpFlag defer func() { - cli.HelpFlag = oldFlag + HelpFlag = oldFlag }() - cli.HelpFlag = cli.BoolFlag{} + HelpFlag = BoolFlag{} - app := cli.NewApp() + app := NewApp() err := app.Run([]string{"test", "-h"}) if err != flag.ErrHelp { @@ -531,17 +564,17 @@ func TestAppNoHelpFlag(t *testing.T) { } func TestAppHelpPrinter(t *testing.T) { - oldPrinter := cli.HelpPrinter + oldPrinter := HelpPrinter defer func() { - cli.HelpPrinter = oldPrinter + HelpPrinter = oldPrinter }() var wasCalled = false - cli.HelpPrinter = func(template string, data interface{}) { + HelpPrinter = func(w io.Writer, template string, data interface{}) { wasCalled = true } - app := cli.NewApp() + app := NewApp() app.Run([]string{"-h"}) if wasCalled == false { @@ -550,19 +583,19 @@ func TestAppHelpPrinter(t *testing.T) { } func TestAppVersionPrinter(t *testing.T) { - oldPrinter := cli.VersionPrinter + oldPrinter := VersionPrinter defer func() { - cli.VersionPrinter = oldPrinter + VersionPrinter = oldPrinter }() var wasCalled = false - cli.VersionPrinter = func(c *cli.Context) { + VersionPrinter = func(c *Context) { wasCalled = true } - app := cli.NewApp() - ctx := cli.NewContext(app, nil, nil) - cli.ShowVersion(ctx) + app := NewApp() + ctx := NewContext(app, nil, nil) + ShowVersion(ctx) if wasCalled == false { t.Errorf("Version printer expected to be called, but was not") @@ -571,16 +604,16 @@ func TestAppVersionPrinter(t *testing.T) { func TestAppCommandNotFound(t *testing.T) { beforeRun, subcommandRun := false, false - app := cli.NewApp() + app := NewApp() - app.CommandNotFound = func(c *cli.Context, command string) { + app.CommandNotFound = func(c *Context, command string) { beforeRun = true } - app.Commands = []cli.Command{ - cli.Command{ + app.Commands = []Command{ + Command{ Name: "bar", - Action: func(c *cli.Context) { + Action: func(c *Context) { subcommandRun = true }, }, @@ -592,31 +625,336 @@ func TestAppCommandNotFound(t *testing.T) { expect(t, subcommandRun, false) } +func TestGlobalFlag(t *testing.T) { + var globalFlag string + var globalFlagSet bool + app := NewApp() + app.Flags = []Flag{ + StringFlag{Name: "global, g", Usage: "global"}, + } + app.Action = func(c *Context) { + globalFlag = c.GlobalString("global") + globalFlagSet = c.GlobalIsSet("global") + } + app.Run([]string{"command", "-g", "foo"}) + expect(t, globalFlag, "foo") + expect(t, globalFlagSet, true) + +} + func TestGlobalFlagsInSubcommands(t *testing.T) { subcommandRun := false - app := cli.NewApp() + parentFlag := false + app := NewApp() - app.Flags = []cli.Flag{ - cli.BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, + app.Flags = []Flag{ + BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, } - app.Commands = []cli.Command{ - cli.Command{ + app.Commands = []Command{ + Command{ Name: "foo", - Subcommands: []cli.Command{ + Flags: []Flag{ + BoolFlag{Name: "parent, p", Usage: "Parent flag"}, + }, + Subcommands: []Command{ { Name: "bar", - Action: func(c *cli.Context) { + Action: func(c *Context) { if c.GlobalBool("debug") { subcommandRun = true } + if c.GlobalBool("parent") { + parentFlag = true + } }, }, }, }, } - app.Run([]string{"command", "-d", "foo", "bar"}) + app.Run([]string{"command", "-d", "foo", "-p", "bar"}) expect(t, subcommandRun, true) + expect(t, parentFlag, true) +} + +func TestApp_Run_CommandWithSubcommandHasHelpTopic(t *testing.T) { + var subcommandHelpTopics = [][]string{ + {"command", "foo", "--help"}, + {"command", "foo", "-h"}, + {"command", "foo", "help"}, + } + + for _, flagSet := range subcommandHelpTopics { + t.Logf("==> checking with flags %v", flagSet) + + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + + subCmdBar := Command{ + Name: "bar", + Usage: "does bar things", + } + subCmdBaz := Command{ + Name: "baz", + Usage: "does baz things", + } + cmd := Command{ + Name: "foo", + Description: "descriptive wall of text about how it does foo things", + Subcommands: []Command{subCmdBar, subCmdBaz}, + } + + app.Commands = []Command{cmd} + err := app.Run(flagSet) + + if err != nil { + t.Error(err) + } + + output := buf.String() + t.Logf("output: %q\n", buf.Bytes()) + + if strings.Contains(output, "No help topic for") { + t.Errorf("expect a help topic, got none: \n%q", output) + } + + for _, shouldContain := range []string{ + cmd.Name, cmd.Description, + subCmdBar.Name, subCmdBar.Usage, + subCmdBaz.Name, subCmdBaz.Usage, + } { + if !strings.Contains(output, shouldContain) { + t.Errorf("want help to contain %q, did not: \n%q", shouldContain, output) + } + } + } +} + +func TestApp_Run_SubcommandFullPath(t *testing.T) { + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + app.Name = "command" + subCmd := Command{ + Name: "bar", + Usage: "does bar things", + } + cmd := Command{ + Name: "foo", + Description: "foo commands", + Subcommands: []Command{subCmd}, + } + app.Commands = []Command{cmd} + + err := app.Run([]string{"command", "foo", "bar", "--help"}) + if err != nil { + t.Error(err) + } + + output := buf.String() + if !strings.Contains(output, "command foo bar - does bar things") { + t.Errorf("expected full path to subcommand: %s", output) + } + if !strings.Contains(output, "command foo bar [arguments...]") { + t.Errorf("expected full path to subcommand: %s", output) + } +} + +func TestApp_Run_SubcommandHelpName(t *testing.T) { + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + app.Name = "command" + subCmd := Command{ + Name: "bar", + HelpName: "custom", + Usage: "does bar things", + } + cmd := Command{ + Name: "foo", + Description: "foo commands", + Subcommands: []Command{subCmd}, + } + app.Commands = []Command{cmd} + + err := app.Run([]string{"command", "foo", "bar", "--help"}) + if err != nil { + t.Error(err) + } + + output := buf.String() + if !strings.Contains(output, "custom - does bar things") { + t.Errorf("expected HelpName for subcommand: %s", output) + } + if !strings.Contains(output, "custom [arguments...]") { + t.Errorf("expected HelpName to subcommand: %s", output) + } +} + +func TestApp_Run_CommandHelpName(t *testing.T) { + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + app.Name = "command" + subCmd := Command{ + Name: "bar", + Usage: "does bar things", + } + cmd := Command{ + Name: "foo", + HelpName: "custom", + Description: "foo commands", + Subcommands: []Command{subCmd}, + } + app.Commands = []Command{cmd} + + err := app.Run([]string{"command", "foo", "bar", "--help"}) + if err != nil { + t.Error(err) + } + + output := buf.String() + if !strings.Contains(output, "command foo bar - does bar things") { + t.Errorf("expected full path to subcommand: %s", output) + } + if !strings.Contains(output, "command foo bar [arguments...]") { + t.Errorf("expected full path to subcommand: %s", output) + } +} + +func TestApp_Run_CommandSubcommandHelpName(t *testing.T) { + app := NewApp() + buf := new(bytes.Buffer) + app.Writer = buf + app.Name = "base" + subCmd := Command{ + Name: "bar", + HelpName: "custom", + Usage: "does bar things", + } + cmd := Command{ + Name: "foo", + Description: "foo commands", + Subcommands: []Command{subCmd}, + } + app.Commands = []Command{cmd} + + err := app.Run([]string{"command", "foo", "--help"}) + if err != nil { + t.Error(err) + } + + output := buf.String() + if !strings.Contains(output, "base foo - foo commands") { + t.Errorf("expected full path to subcommand: %s", output) + } + if !strings.Contains(output, "base foo command [command options] [arguments...]") { + t.Errorf("expected full path to subcommand: %s", output) + } +} + +func TestApp_Run_Help(t *testing.T) { + var helpArguments = [][]string{{"boom", "--help"}, {"boom", "-h"}, {"boom", "help"}} + + for _, args := range helpArguments { + buf := new(bytes.Buffer) + + t.Logf("==> checking with arguments %v", args) + + app := NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Writer = buf + app.Action = func(c *Context) { + buf.WriteString("boom I say!") + } + + err := app.Run(args) + if err != nil { + t.Error(err) + } + + output := buf.String() + t.Logf("output: %q\n", buf.Bytes()) + + if !strings.Contains(output, "boom - make an explosive entrance") { + t.Errorf("want help to contain %q, did not: \n%q", "boom - make an explosive entrance", output) + } + } +} + +func TestApp_Run_Version(t *testing.T) { + var versionArguments = [][]string{{"boom", "--version"}, {"boom", "-v"}} + + for _, args := range versionArguments { + buf := new(bytes.Buffer) + + t.Logf("==> checking with arguments %v", args) + + app := NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Version = "0.1.0" + app.Writer = buf + app.Action = func(c *Context) { + buf.WriteString("boom I say!") + } + + err := app.Run(args) + if err != nil { + t.Error(err) + } + + output := buf.String() + t.Logf("output: %q\n", buf.Bytes()) + + if !strings.Contains(output, "0.1.0") { + t.Errorf("want version to contain %q, did not: \n%q", "0.1.0", output) + } + } +} + +func TestApp_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) { + app := NewApp() + app.Action = func(c *Context) {} + app.Before = func(c *Context) error { return fmt.Errorf("before error") } + app.After = func(c *Context) error { return fmt.Errorf("after error") } + + err := app.Run([]string{"foo"}) + if err == nil { + t.Fatalf("expected to recieve error from Run, got none") + } + + if !strings.Contains(err.Error(), "before error") { + t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) + } + if !strings.Contains(err.Error(), "after error") { + t.Errorf("expected text of error from After method, but got none in \"%v\"", err) + } +} + +func TestApp_Run_SubcommandDoesNotOverwriteErrorFromBefore(t *testing.T) { + app := NewApp() + app.Commands = []Command{ + Command{ + Name: "bar", + Before: func(c *Context) error { return fmt.Errorf("before error") }, + After: func(c *Context) error { return fmt.Errorf("after error") }, + }, + } + + err := app.Run([]string{"foo", "bar"}) + if err == nil { + t.Fatalf("expected to recieve error from Run, got none") + } + + if !strings.Contains(err.Error(), "before error") { + t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) + } + if !strings.Contains(err.Error(), "after error") { + t.Errorf("expected text of error from After method, but got none in \"%v\"", err) + } } diff --git a/vendor/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/vendor/github.com/codegangsta/cli/autocomplete/bash_autocomplete index 9b55dd99..21a232f1 100644 --- a/vendor/github.com/codegangsta/cli/autocomplete/bash_autocomplete +++ b/vendor/github.com/codegangsta/cli/autocomplete/bash_autocomplete @@ -1,13 +1,14 @@ #! /bin/bash +: ${PROG:=$(basename ${BASH_SOURCE})} + _cli_bash_autocomplete() { - local cur prev opts base + local cur opts base COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD-1]}" opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) return 0 } - complete -F _cli_bash_autocomplete $PROG \ No newline at end of file + complete -F _cli_bash_autocomplete $PROG diff --git a/vendor/github.com/codegangsta/cli/cli.go b/vendor/github.com/codegangsta/cli/cli.go index b7425458..31dc9124 100644 --- a/vendor/github.com/codegangsta/cli/cli.go +++ b/vendor/github.com/codegangsta/cli/cli.go @@ -17,3 +17,24 @@ // app.Run(os.Args) // } package cli + +import ( + "strings" +) + +type MultiError struct { + Errors []error +} + +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} diff --git a/vendor/github.com/codegangsta/cli/cli_test.go b/vendor/github.com/codegangsta/cli/cli_test.go deleted file mode 100644 index 879a793d..00000000 --- a/vendor/github.com/codegangsta/cli/cli_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package cli_test - -import ( - "os" - - "github.com/codegangsta/cli" -) - -func Example() { - app := cli.NewApp() - app.Name = "todo" - app.Usage = "task list on the command line" - app.Commands = []cli.Command{ - { - Name: "add", - ShortName: "a", - Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - ShortName: "c", - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - }, - } - - app.Run(os.Args) -} - -func ExampleSubcommand() { - app := cli.NewApp() - app.Name = "say" - app.Commands = []cli.Command{ - { - Name: "hello", - ShortName: "hi", - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ - { - Name: "english", - ShortName: "en", - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hello, ", c.String("name")) - }, - }, { - Name: "spanish", - ShortName: "sp", - Usage: "sends a greeting in spanish", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "surname", - Value: "Jones", - Usage: "Surname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hola, ", c.String("surname")) - }, - }, { - Name: "french", - ShortName: "fr", - Usage: "sends a greeting in french", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "nickname", - Value: "Stevie", - Usage: "Nickname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Bonjour, ", c.String("nickname")) - }, - }, - }, - }, { - Name: "bye", - Usage: "says goodbye", - Action: func(c *cli.Context) { - println("bye") - }, - }, - } - - app.Run(os.Args) -} diff --git a/vendor/github.com/codegangsta/cli/command.go b/vendor/github.com/codegangsta/cli/command.go index 07c919a8..824e77ba 100644 --- a/vendor/github.com/codegangsta/cli/command.go +++ b/vendor/github.com/codegangsta/cli/command.go @@ -10,12 +10,16 @@ import ( type Command struct { // The name of the command Name string - // short name of the command. Typically one character + // short name of the command. Typically one character (deprecated, use `Aliases`) ShortName string + // A list of aliases for the command + Aliases []string // A short description of the usage of this command Usage string // A longer explanation of how the command works Description string + // A short description of the arguments of this command + ArgsUsage string // The function to call when checking for bash command completions BashComplete func(context *Context) // An action to execute before any sub-subcommands are run, but after the context is ready @@ -34,11 +38,23 @@ type Command struct { SkipFlagParsing bool // Boolean to hide built-in help command HideHelp bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string +} + +// Returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") } // Invokes the command given the context, parses ctx.Args() to generate command-specific flags func (c Command) Run(ctx *Context) error { - if len(c.Subcommands) > 0 || c.Before != nil || c.After != nil { return c.startApp(ctx) } @@ -58,40 +74,46 @@ func (c Command) Run(ctx *Context) error { set := flagSet(c.Name, c.Flags) set.SetOutput(ioutil.Discard) - firstFlagIndex := -1 - terminatorIndex := -1 - for index, arg := range ctx.Args() { - if arg == "--" { - terminatorIndex = index - break - } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { - firstFlagIndex = index - } - } - var err error - if firstFlagIndex > -1 && !c.SkipFlagParsing { - args := ctx.Args() - regularArgs := make([]string, len(args[1:firstFlagIndex])) - copy(regularArgs, args[1:firstFlagIndex]) - - var flagArgs []string - if terminatorIndex > -1 { - flagArgs = args[firstFlagIndex:terminatorIndex] - regularArgs = append(regularArgs, args[terminatorIndex:]...) - } else { - flagArgs = args[firstFlagIndex:] + if !c.SkipFlagParsing { + firstFlagIndex := -1 + terminatorIndex := -1 + for index, arg := range ctx.Args() { + if arg == "--" { + terminatorIndex = index + break + } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { + firstFlagIndex = index + } } - err = set.Parse(append(flagArgs, regularArgs...)) + if firstFlagIndex > -1 { + args := ctx.Args() + regularArgs := make([]string, len(args[1:firstFlagIndex])) + copy(regularArgs, args[1:firstFlagIndex]) + + var flagArgs []string + if terminatorIndex > -1 { + flagArgs = args[firstFlagIndex:terminatorIndex] + regularArgs = append(regularArgs, args[terminatorIndex:]...) + } else { + flagArgs = args[firstFlagIndex:] + } + + err = set.Parse(append(flagArgs, regularArgs...)) + } else { + err = set.Parse(ctx.Args().Tail()) + } } else { - err = set.Parse(ctx.Args().Tail()) + if c.SkipFlagParsing { + err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) + } } if err != nil { - fmt.Fprint(ctx.App.Writer, "Incorrect Usage.\n\n") - ShowCommandHelp(ctx, c.Name) + fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) return err } @@ -100,10 +122,9 @@ func (c Command) Run(ctx *Context) error { fmt.Fprintln(ctx.App.Writer, nerr) fmt.Fprintln(ctx.App.Writer) ShowCommandHelp(ctx, c.Name) - fmt.Fprintln(ctx.App.Writer) return nerr } - context := NewContext(ctx.App, set, ctx.globalSet) + context := NewContext(ctx.App, set, ctx) if checkCommandCompletions(context, c.Name) { return nil @@ -117,9 +138,24 @@ func (c Command) Run(ctx *Context) error { return nil } +func (c Command) Names() []string { + names := []string{c.Name} + + if c.ShortName != "" { + names = append(names, c.ShortName) + } + + return append(names, c.Aliases...) +} + // Returns true if Command.Name or Command.ShortName matches given name func (c Command) HasName(name string) bool { - return c.Name == name || (c.ShortName != "" && c.ShortName == name) + for _, n := range c.Names() { + if n == name { + return true + } + } + return false } func (c Command) startApp(ctx *Context) error { @@ -127,6 +163,12 @@ func (c Command) startApp(ctx *Context) error { // set the name and usage app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + } + if c.Description != "" { app.Usage = c.Description } else { @@ -141,6 +183,13 @@ func (c Command) startApp(ctx *Context) error { app.Flags = c.Flags app.HideHelp = c.HideHelp + app.Version = ctx.App.Version + app.HideVersion = ctx.App.HideVersion + app.Compiled = ctx.App.Compiled + app.Author = ctx.App.Author + app.Email = ctx.App.Email + app.Writer = ctx.App.Writer + // bash completion app.EnableBashCompletion = ctx.App.EnableBashCompletion if c.BashComplete != nil { @@ -156,5 +205,12 @@ func (c Command) startApp(ctx *Context) error { app.Action = helpSubcommand.Action } + var newCmds []Command + for _, cc := range app.Commands { + cc.commandNamePath = []string{c.Name, cc.Name} + newCmds = append(newCmds, cc) + } + app.Commands = newCmds + return app.RunAsSubcommand(ctx) } diff --git a/vendor/github.com/codegangsta/cli/command_test.go b/vendor/github.com/codegangsta/cli/command_test.go index c0f556ad..dd9fc87f 100644 --- a/vendor/github.com/codegangsta/cli/command_test.go +++ b/vendor/github.com/codegangsta/cli/command_test.go @@ -1,49 +1,43 @@ -package cli_test +package cli import ( + "errors" "flag" "testing" - - "github.com/codegangsta/cli" ) -func TestCommandDoNotIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah", "-break"} - set.Parse(test) - - c := cli.NewContext(app, set, set) - - command := cli.Command{ - Name: "test-cmd", - ShortName: "tc", - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) {}, +func TestCommandFlagParsing(t *testing.T) { + cases := []struct { + testArgs []string + skipFlagParsing bool + expectedErr error + }{ + {[]string{"blah", "blah", "-break"}, false, errors.New("flag provided but not defined: -break")}, // Test normal "not ignoring flags" flow + {[]string{"blah", "blah"}, true, nil}, // Test SkipFlagParsing without any args that look like flags + {[]string{"blah", "-break"}, true, nil}, // Test SkipFlagParsing with random flag arg + {[]string{"blah", "-help"}, true, nil}, // Test SkipFlagParsing with "special" help flag arg } - err := command.Run(c) - expect(t, err.Error(), "flag provided but not defined: -break") -} + for _, c := range cases { + app := NewApp() + set := flag.NewFlagSet("test", 0) + set.Parse(c.testArgs) -func TestCommandIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah"} - set.Parse(test) + context := NewContext(app, set, nil) - c := cli.NewContext(app, set, set) + command := Command{ + Name: "test-cmd", + Aliases: []string{"tc"}, + Usage: "this is for testing", + Description: "testing", + Action: func(_ *Context) {}, + } - command := cli.Command{ - Name: "test-cmd", - ShortName: "tc", - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) {}, - SkipFlagParsing: true, + command.SkipFlagParsing = c.skipFlagParsing + + err := command.Run(context) + + expect(t, err, c.expectedErr) + expect(t, []string(context.Args()), c.testArgs) } - err := command.Run(c) - - expect(t, err, nil) } diff --git a/vendor/github.com/codegangsta/cli/context.go b/vendor/github.com/codegangsta/cli/context.go index 37221bdc..f541f41c 100644 --- a/vendor/github.com/codegangsta/cli/context.go +++ b/vendor/github.com/codegangsta/cli/context.go @@ -16,14 +16,14 @@ type Context struct { App *App Command Command flagSet *flag.FlagSet - globalSet *flag.FlagSet setFlags map[string]bool globalSetFlags map[string]bool + parentContext *Context } // Creates a new context. For use in when invoking an App or Command action. -func NewContext(app *App, set *flag.FlagSet, globalSet *flag.FlagSet) *Context { - return &Context{App: app, flagSet: set, globalSet: globalSet} +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + return &Context{App: app, flagSet: set, parentContext: parentCtx} } // Looks up the value of a local int flag, returns 0 if no int flag exists @@ -73,37 +73,58 @@ func (c *Context) Generic(name string) interface{} { // Looks up the value of a global int flag, returns 0 if no int flag exists func (c *Context) GlobalInt(name string) int { - return lookupInt(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 } // Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists func (c *Context) GlobalDuration(name string) time.Duration { - return lookupDuration(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 } // Looks up the value of a global bool flag, returns false if no bool flag exists func (c *Context) GlobalBool(name string) bool { - return lookupBool(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false } // Looks up the value of a global string flag, returns "" if no string flag exists func (c *Context) GlobalString(name string) string { - return lookupString(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" } // Looks up the value of a global string slice flag, returns nil if no string slice flag exists func (c *Context) GlobalStringSlice(name string) []string { - return lookupStringSlice(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil } // Looks up the value of a global int slice flag, returns nil if no int slice flag exists func (c *Context) GlobalIntSlice(name string) []int { - return lookupIntSlice(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil } // Looks up the value of a global generic flag, returns nil if no generic flag exists func (c *Context) GlobalGeneric(name string) interface{} { - return lookupGeneric(name, c.globalSet) + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil } // Returns the number of flags set @@ -126,11 +147,17 @@ func (c *Context) IsSet(name string) bool { func (c *Context) GlobalIsSet(name string) bool { if c.globalSetFlags == nil { c.globalSetFlags = make(map[string]bool) - c.globalSet.Visit(func(f *flag.Flag) { - c.globalSetFlags[f.Name] = true - }) + ctx := c + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil && c.globalSetFlags[name] == false; ctx = ctx.parentContext { + ctx.flagSet.Visit(func(f *flag.Flag) { + c.globalSetFlags[f.Name] = true + }) + } } - return c.globalSetFlags[name] == true + return c.globalSetFlags[name] } // Returns a slice of flag names used in this context. @@ -157,6 +184,11 @@ func (c *Context) GlobalFlagNames() (names []string) { return } +// Returns the parent context, if any +func (c *Context) Parent() *Context { + return c.parentContext +} + type Args []string // Returns the command line arguments associated with the context. @@ -201,6 +233,18 @@ func (a Args) Swap(from, to int) error { return nil } +func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil; ctx = ctx.parentContext { + if f := ctx.flagSet.Lookup(name); f != nil { + return ctx.flagSet + } + } + return nil +} + func lookupInt(name string, set *flag.FlagSet) int { f := set.Lookup(name) if f != nil { diff --git a/vendor/github.com/codegangsta/cli/context_test.go b/vendor/github.com/codegangsta/cli/context_test.go index d4a1877f..7f8e9289 100644 --- a/vendor/github.com/codegangsta/cli/context_test.go +++ b/vendor/github.com/codegangsta/cli/context_test.go @@ -1,11 +1,9 @@ -package cli_test +package cli import ( "flag" "testing" "time" - - "github.com/codegangsta/cli" ) func TestNewContext(t *testing.T) { @@ -13,8 +11,9 @@ func TestNewContext(t *testing.T) { set.Int("myflag", 12, "doc") globalSet := flag.NewFlagSet("test", 0) globalSet.Int("myflag", 42, "doc") - command := cli.Command{Name: "mycommand"} - c := cli.NewContext(nil, set, globalSet) + globalCtx := NewContext(nil, globalSet, nil) + command := Command{Name: "mycommand"} + c := NewContext(nil, set, globalCtx) c.Command = command expect(t, c.Int("myflag"), 12) expect(t, c.GlobalInt("myflag"), 42) @@ -24,42 +23,42 @@ func TestNewContext(t *testing.T) { func TestContext_Int(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Int("myflag", 12, "doc") - c := cli.NewContext(nil, set, set) + c := NewContext(nil, set, nil) expect(t, c.Int("myflag"), 12) } func TestContext_Duration(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Duration("myflag", time.Duration(12*time.Second), "doc") - c := cli.NewContext(nil, set, set) + c := NewContext(nil, set, nil) expect(t, c.Duration("myflag"), time.Duration(12*time.Second)) } func TestContext_String(t *testing.T) { set := flag.NewFlagSet("test", 0) set.String("myflag", "hello world", "doc") - c := cli.NewContext(nil, set, set) + c := NewContext(nil, set, nil) expect(t, c.String("myflag"), "hello world") } func TestContext_Bool(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) + c := NewContext(nil, set, nil) expect(t, c.Bool("myflag"), false) } func TestContext_BoolT(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Bool("myflag", true, "doc") - c := cli.NewContext(nil, set, set) + c := NewContext(nil, set, nil) expect(t, c.BoolT("myflag"), true) } func TestContext_Args(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) + c := NewContext(nil, set, nil) set.Parse([]string{"--myflag", "bat", "baz"}) expect(t, len(c.Args()), 2) expect(t, c.Bool("myflag"), true) @@ -71,7 +70,8 @@ func TestContext_IsSet(t *testing.T) { set.String("otherflag", "hello world", "doc") globalSet := flag.NewFlagSet("test", 0) globalSet.Bool("myflagGlobal", true, "doc") - c := cli.NewContext(nil, set, globalSet) + globalCtx := NewContext(nil, globalSet, nil) + c := NewContext(nil, set, globalCtx) set.Parse([]string{"--myflag", "bat", "baz"}) globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) expect(t, c.IsSet("myflag"), true) @@ -87,7 +87,8 @@ func TestContext_GlobalIsSet(t *testing.T) { globalSet := flag.NewFlagSet("test", 0) globalSet.Bool("myflagGlobal", true, "doc") globalSet.Bool("myflagGlobalUnset", true, "doc") - c := cli.NewContext(nil, set, globalSet) + globalCtx := NewContext(nil, globalSet, nil) + c := NewContext(nil, set, globalCtx) set.Parse([]string{"--myflag", "bat", "baz"}) globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) expect(t, c.GlobalIsSet("myflag"), false) @@ -104,7 +105,8 @@ func TestContext_NumFlags(t *testing.T) { set.String("otherflag", "hello world", "doc") globalSet := flag.NewFlagSet("test", 0) globalSet.Bool("myflagGlobal", true, "doc") - c := cli.NewContext(nil, set, globalSet) + globalCtx := NewContext(nil, globalSet, nil) + c := NewContext(nil, set, globalCtx) set.Parse([]string{"--myflag", "--otherflag=foo"}) globalSet.Parse([]string{"--myflagGlobal"}) expect(t, c.NumFlags(), 2) diff --git a/vendor/github.com/codegangsta/cli/flag.go b/vendor/github.com/codegangsta/cli/flag.go index 25115866..9b22d7f1 100644 --- a/vendor/github.com/codegangsta/cli/flag.go +++ b/vendor/github.com/codegangsta/cli/flag.go @@ -99,21 +99,27 @@ func (f GenericFlag) getName() string { return f.Name } +// StringSlice is an opaque type for []string to satisfy flag.Value type StringSlice []string +// Set appends the string value to the list of values func (f *StringSlice) Set(value string) error { *f = append(*f, value) return nil } +// String returns a readable representation of this value (for usage defaults) func (f *StringSlice) String() string { return fmt.Sprintf("%s", *f) } +// Value returns the slice of strings set by this flag func (f *StringSlice) Value() []string { return *f } +// StringSlice is a string flag that can be specified multiple times on the +// command-line type StringSliceFlag struct { Name string Value *StringSlice @@ -121,12 +127,14 @@ type StringSliceFlag struct { EnvVar string } +// String returns the usage func (f StringSliceFlag) String() string { firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") pref := prefixFor(firstName) return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) } +// Apply populates the flag given the flag set and environment func (f StringSliceFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { @@ -144,6 +152,9 @@ func (f StringSliceFlag) Apply(set *flag.FlagSet) { } eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &StringSlice{} + } set.Var(f.Value, name, f.Usage) }) } @@ -152,10 +163,11 @@ func (f StringSliceFlag) getName() string { return f.Name } +// StringSlice is an opaque type for []int to satisfy flag.Value type IntSlice []int +// Set parses the value into an integer and appends it to the list of values func (f *IntSlice) Set(value string) error { - tmp, err := strconv.Atoi(value) if err != nil { return err @@ -165,14 +177,18 @@ func (f *IntSlice) Set(value string) error { return nil } +// String returns a readable representation of this value (for usage defaults) func (f *IntSlice) String() string { return fmt.Sprintf("%d", *f) } +// Value returns the slice of ints set by this flag func (f *IntSlice) Value() []int { return *f } +// IntSliceFlag is an int flag that can be specified multiple times on the +// command-line type IntSliceFlag struct { Name string Value *IntSlice @@ -180,12 +196,14 @@ type IntSliceFlag struct { EnvVar string } +// String returns the usage func (f IntSliceFlag) String() string { firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") pref := prefixFor(firstName) return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) } +// Apply populates the flag given the flag set and environment func (f IntSliceFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { @@ -206,6 +224,9 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) { } eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &IntSlice{} + } set.Var(f.Value, name, f.Usage) }) } @@ -214,16 +235,20 @@ func (f IntSliceFlag) getName() string { return f.Name } +// BoolFlag is a switch that defaults to false type BoolFlag struct { - Name string - Usage string - EnvVar string + Name string + Usage string + EnvVar string + Destination *bool } +// String returns a readable representation of this value (for usage defaults) func (f BoolFlag) String() string { return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) } +// Apply populates the flag given the flag set and environment func (f BoolFlag) Apply(set *flag.FlagSet) { val := false if f.EnvVar != "" { @@ -240,6 +265,10 @@ func (f BoolFlag) Apply(set *flag.FlagSet) { } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } set.Bool(name, val, f.Usage) }) } @@ -248,16 +277,21 @@ func (f BoolFlag) getName() string { return f.Name } +// BoolTFlag this represents a boolean flag that is true by default, but can +// still be set to false by --some-flag=false type BoolTFlag struct { - Name string - Usage string - EnvVar string + Name string + Usage string + EnvVar string + Destination *bool } +// String returns a readable representation of this value (for usage defaults) func (f BoolTFlag) String() string { return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) } +// Apply populates the flag given the flag set and environment func (f BoolTFlag) Apply(set *flag.FlagSet) { val := true if f.EnvVar != "" { @@ -274,6 +308,10 @@ func (f BoolTFlag) Apply(set *flag.FlagSet) { } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } set.Bool(name, val, f.Usage) }) } @@ -282,13 +320,16 @@ func (f BoolTFlag) getName() string { return f.Name } +// StringFlag represents a flag that takes as string value type StringFlag struct { - Name string - Value string - Usage string - EnvVar string + Name string + Value string + Usage string + EnvVar string + Destination *string } +// String returns the usage func (f StringFlag) String() string { var fmtString string fmtString = "%s %v\t%v" @@ -302,6 +343,7 @@ func (f StringFlag) String() string { return withEnvHint(f.EnvVar, fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage)) } +// Apply populates the flag given the flag set and environment func (f StringFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { @@ -314,6 +356,10 @@ func (f StringFlag) Apply(set *flag.FlagSet) { } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + return + } set.String(name, f.Value, f.Usage) }) } @@ -322,17 +368,22 @@ func (f StringFlag) getName() string { return f.Name } +// IntFlag is a flag that takes an integer +// Errors if the value provided cannot be parsed type IntFlag struct { - Name string - Value int - Usage string - EnvVar string + Name string + Value int + Usage string + EnvVar string + Destination *int } +// String returns the usage func (f IntFlag) String() string { return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) } +// Apply populates the flag given the flag set and environment func (f IntFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { @@ -348,6 +399,10 @@ func (f IntFlag) Apply(set *flag.FlagSet) { } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } set.Int(name, f.Value, f.Usage) }) } @@ -356,17 +411,22 @@ func (f IntFlag) getName() string { return f.Name } +// DurationFlag is a flag that takes a duration specified in Go's duration +// format: https://golang.org/pkg/time/#ParseDuration type DurationFlag struct { - Name string - Value time.Duration - Usage string - EnvVar string + Name string + Value time.Duration + Usage string + EnvVar string + Destination *time.Duration } +// String returns a readable representation of this value (for usage defaults) func (f DurationFlag) String() string { return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) } +// Apply populates the flag given the flag set and environment func (f DurationFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { @@ -382,6 +442,10 @@ func (f DurationFlag) Apply(set *flag.FlagSet) { } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + return + } set.Duration(name, f.Value, f.Usage) }) } @@ -390,17 +454,22 @@ func (f DurationFlag) getName() string { return f.Name } +// Float64Flag is a flag that takes an float value +// Errors if the value provided cannot be parsed type Float64Flag struct { - Name string - Value float64 - Usage string - EnvVar string + Name string + Value float64 + Usage string + EnvVar string + Destination *float64 } +// String returns the usage func (f Float64Flag) String() string { return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) } +// Apply populates the flag given the flag set and environment func (f Float64Flag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { @@ -415,6 +484,10 @@ func (f Float64Flag) Apply(set *flag.FlagSet) { } eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + return + } set.Float64(name, f.Value, f.Usage) }) } diff --git a/vendor/github.com/codegangsta/cli/flag_test.go b/vendor/github.com/codegangsta/cli/flag_test.go index f0f096a2..4462d3fe 100644 --- a/vendor/github.com/codegangsta/cli/flag_test.go +++ b/vendor/github.com/codegangsta/cli/flag_test.go @@ -1,4 +1,4 @@ -package cli_test +package cli import ( "fmt" @@ -6,8 +6,6 @@ import ( "reflect" "strings" "testing" - - "github.com/codegangsta/cli" ) var boolFlagTests = []struct { @@ -21,7 +19,7 @@ var boolFlagTests = []struct { func TestBoolFlagHelpOutput(t *testing.T) { for _, test := range boolFlagTests { - flag := cli.BoolFlag{Name: test.name} + flag := BoolFlag{Name: test.name} output := flag.String() if output != test.expected { @@ -44,7 +42,7 @@ var stringFlagTests = []struct { func TestStringFlagHelpOutput(t *testing.T) { for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value} + flag := StringFlag{Name: test.name, Value: test.value} output := flag.String() if output != test.expected { @@ -57,7 +55,7 @@ func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { os.Clearenv() os.Setenv("APP_FOO", "derp") for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} + flag := StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} output := flag.String() if !strings.HasSuffix(output, " [$APP_FOO]") { @@ -68,26 +66,26 @@ func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { var stringSliceFlagTests = []struct { name string - value *cli.StringSlice + value *StringSlice expected string }{ - {"help", func() *cli.StringSlice { - s := &cli.StringSlice{} + {"help", func() *StringSlice { + s := &StringSlice{} s.Set("") return s }(), "--help [--help option --help option]\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} + {"h", func() *StringSlice { + s := &StringSlice{} s.Set("") return s }(), "-h [-h option -h option]\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} + {"h", func() *StringSlice { + s := &StringSlice{} s.Set("") return s }(), "-h [-h option -h option]\t"}, - {"test", func() *cli.StringSlice { - s := &cli.StringSlice{} + {"test", func() *StringSlice { + s := &StringSlice{} s.Set("Something") return s }(), "--test [--test option --test option]\t"}, @@ -96,7 +94,7 @@ var stringSliceFlagTests = []struct { func TestStringSliceFlagHelpOutput(t *testing.T) { for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value} + flag := StringSliceFlag{Name: test.name, Value: test.value} output := flag.String() if output != test.expected { @@ -109,7 +107,7 @@ func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) { os.Clearenv() os.Setenv("APP_QWWX", "11,4") for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} + flag := StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} output := flag.String() if !strings.HasSuffix(output, " [$APP_QWWX]") { @@ -129,7 +127,7 @@ var intFlagTests = []struct { func TestIntFlagHelpOutput(t *testing.T) { for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name} + flag := IntFlag{Name: test.name} output := flag.String() if output != test.expected { @@ -142,7 +140,7 @@ func TestIntFlagWithEnvVarHelpOutput(t *testing.T) { os.Clearenv() os.Setenv("APP_BAR", "2") for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name, EnvVar: "APP_BAR"} + flag := IntFlag{Name: test.name, EnvVar: "APP_BAR"} output := flag.String() if !strings.HasSuffix(output, " [$APP_BAR]") { @@ -162,7 +160,7 @@ var durationFlagTests = []struct { func TestDurationFlagHelpOutput(t *testing.T) { for _, test := range durationFlagTests { - flag := cli.DurationFlag{Name: test.name} + flag := DurationFlag{Name: test.name} output := flag.String() if output != test.expected { @@ -175,7 +173,7 @@ func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) { os.Clearenv() os.Setenv("APP_BAR", "2h3m6s") for _, test := range durationFlagTests { - flag := cli.DurationFlag{Name: test.name, EnvVar: "APP_BAR"} + flag := DurationFlag{Name: test.name, EnvVar: "APP_BAR"} output := flag.String() if !strings.HasSuffix(output, " [$APP_BAR]") { @@ -186,14 +184,14 @@ func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) { var intSliceFlagTests = []struct { name string - value *cli.IntSlice + value *IntSlice expected string }{ - {"help", &cli.IntSlice{}, "--help [--help option --help option]\t"}, - {"h", &cli.IntSlice{}, "-h [-h option -h option]\t"}, - {"h", &cli.IntSlice{}, "-h [-h option -h option]\t"}, - {"test", func() *cli.IntSlice { - i := &cli.IntSlice{} + {"help", &IntSlice{}, "--help [--help option --help option]\t"}, + {"h", &IntSlice{}, "-h [-h option -h option]\t"}, + {"h", &IntSlice{}, "-h [-h option -h option]\t"}, + {"test", func() *IntSlice { + i := &IntSlice{} i.Set("9") return i }(), "--test [--test option --test option]\t"}, @@ -202,7 +200,7 @@ var intSliceFlagTests = []struct { func TestIntSliceFlagHelpOutput(t *testing.T) { for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value} + flag := IntSliceFlag{Name: test.name, Value: test.value} output := flag.String() if output != test.expected { @@ -215,7 +213,7 @@ func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) { os.Clearenv() os.Setenv("APP_SMURF", "42,3") for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} + flag := IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} output := flag.String() if !strings.HasSuffix(output, " [$APP_SMURF]") { @@ -235,7 +233,7 @@ var float64FlagTests = []struct { func TestFloat64FlagHelpOutput(t *testing.T) { for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name} + flag := Float64Flag{Name: test.name} output := flag.String() if output != test.expected { @@ -248,7 +246,7 @@ func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { os.Clearenv() os.Setenv("APP_BAZ", "99.4") for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} + flag := Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} output := flag.String() if !strings.HasSuffix(output, " [$APP_BAZ]") { @@ -259,7 +257,7 @@ func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { var genericFlagTests = []struct { name string - value cli.Generic + value Generic expected string }{ {"test", &Parser{"abc", "def"}, "--test \"abc,def\"\ttest flag"}, @@ -269,7 +267,7 @@ var genericFlagTests = []struct { func TestGenericFlagHelpOutput(t *testing.T) { for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"} + flag := GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"} output := flag.String() if output != test.expected { @@ -282,7 +280,7 @@ func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { os.Clearenv() os.Setenv("APP_ZAP", "3") for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} + flag := GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} output := flag.String() if !strings.HasSuffix(output, " [$APP_ZAP]") { @@ -292,11 +290,11 @@ func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { } func TestParseMultiString(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "serve, s"}, + (&App{ + Flags: []Flag{ + StringFlag{Name: "serve, s"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.String("serve") != "10" { t.Errorf("main name not set") } @@ -307,14 +305,32 @@ func TestParseMultiString(t *testing.T) { }).Run([]string{"run", "-s", "10"}) } +func TestParseDestinationString(t *testing.T) { + var dest string + a := App{ + Flags: []Flag{ + StringFlag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) { + if dest != "10" { + t.Errorf("expected destination String 10") + } + }, + } + a.Run([]string{"run", "--dest", "10"}) +} + func TestParseMultiStringFromEnv(t *testing.T) { os.Clearenv() os.Setenv("APP_COUNT", "20") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, + (&App{ + Flags: []Flag{ + StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.String("count") != "20" { t.Errorf("main name not set") } @@ -328,11 +344,11 @@ func TestParseMultiStringFromEnv(t *testing.T) { func TestParseMultiStringFromEnvCascade(t *testing.T) { os.Clearenv() os.Setenv("APP_COUNT", "20") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"}, + (&App{ + Flags: []Flag{ + StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.String("count") != "20" { t.Errorf("main name not set") } @@ -344,11 +360,11 @@ func TestParseMultiStringFromEnvCascade(t *testing.T) { } func TestParseMultiStringSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "serve, s", Value: &cli.StringSlice{}}, + (&App{ + Flags: []Flag{ + StringSliceFlag{Name: "serve, s", Value: &StringSlice{}}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) { t.Errorf("main name not set") } @@ -363,11 +379,11 @@ func TestParseMultiStringSliceFromEnv(t *testing.T) { os.Clearenv() os.Setenv("APP_INTERVALS", "20,30,40") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "APP_INTERVALS"}, + (&App{ + Flags: []Flag{ + StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "APP_INTERVALS"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { t.Errorf("main name not set from env") } @@ -382,11 +398,11 @@ func TestParseMultiStringSliceFromEnvCascade(t *testing.T) { os.Clearenv() os.Setenv("APP_INTERVALS", "20,30,40") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, + (&App{ + Flags: []Flag{ + StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { t.Errorf("main name not set from env") } @@ -398,11 +414,11 @@ func TestParseMultiStringSliceFromEnvCascade(t *testing.T) { } func TestParseMultiInt(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "serve, s"}, + a := App{ + Flags: []Flag{ + IntFlag{Name: "serve, s"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.Int("serve") != 10 { t.Errorf("main name not set") } @@ -414,14 +430,32 @@ func TestParseMultiInt(t *testing.T) { a.Run([]string{"run", "-s", "10"}) } +func TestParseDestinationInt(t *testing.T) { + var dest int + a := App{ + Flags: []Flag{ + IntFlag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) { + if dest != 10 { + t.Errorf("expected destination Int 10") + } + }, + } + a.Run([]string{"run", "--dest", "10"}) +} + func TestParseMultiIntFromEnv(t *testing.T) { os.Clearenv() os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, + a := App{ + Flags: []Flag{ + IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.Int("timeout") != 10 { t.Errorf("main name not set") } @@ -436,11 +470,11 @@ func TestParseMultiIntFromEnv(t *testing.T) { func TestParseMultiIntFromEnvCascade(t *testing.T) { os.Clearenv() os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, + a := App{ + Flags: []Flag{ + IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.Int("timeout") != 10 { t.Errorf("main name not set") } @@ -453,11 +487,11 @@ func TestParseMultiIntFromEnvCascade(t *testing.T) { } func TestParseMultiIntSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "serve, s", Value: &cli.IntSlice{}}, + (&App{ + Flags: []Flag{ + IntSliceFlag{Name: "serve, s", Value: &IntSlice{}}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) { t.Errorf("main name not set") } @@ -472,11 +506,11 @@ func TestParseMultiIntSliceFromEnv(t *testing.T) { os.Clearenv() os.Setenv("APP_INTERVALS", "20,30,40") - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "APP_INTERVALS"}, + (&App{ + Flags: []Flag{ + IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "APP_INTERVALS"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { t.Errorf("main name not set from env") } @@ -491,11 +525,11 @@ func TestParseMultiIntSliceFromEnvCascade(t *testing.T) { os.Clearenv() os.Setenv("APP_INTERVALS", "20,30,40") - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, + (&App{ + Flags: []Flag{ + IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { t.Errorf("main name not set from env") } @@ -507,11 +541,11 @@ func TestParseMultiIntSliceFromEnvCascade(t *testing.T) { } func TestParseMultiFloat64(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "serve, s"}, + a := App{ + Flags: []Flag{ + Float64Flag{Name: "serve, s"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.Float64("serve") != 10.2 { t.Errorf("main name not set") } @@ -523,14 +557,32 @@ func TestParseMultiFloat64(t *testing.T) { a.Run([]string{"run", "-s", "10.2"}) } +func TestParseDestinationFloat64(t *testing.T) { + var dest float64 + a := App{ + Flags: []Flag{ + Float64Flag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) { + if dest != 10.2 { + t.Errorf("expected destination Float64 10.2") + } + }, + } + a.Run([]string{"run", "--dest", "10.2"}) +} + func TestParseMultiFloat64FromEnv(t *testing.T) { os.Clearenv() os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, + a := App{ + Flags: []Flag{ + Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.Float64("timeout") != 15.5 { t.Errorf("main name not set") } @@ -545,11 +597,11 @@ func TestParseMultiFloat64FromEnv(t *testing.T) { func TestParseMultiFloat64FromEnvCascade(t *testing.T) { os.Clearenv() os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, + a := App{ + Flags: []Flag{ + Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.Float64("timeout") != 15.5 { t.Errorf("main name not set") } @@ -562,11 +614,11 @@ func TestParseMultiFloat64FromEnvCascade(t *testing.T) { } func TestParseMultiBool(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "serve, s"}, + a := App{ + Flags: []Flag{ + BoolFlag{Name: "serve, s"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.Bool("serve") != true { t.Errorf("main name not set") } @@ -578,14 +630,32 @@ func TestParseMultiBool(t *testing.T) { a.Run([]string{"run", "--serve"}) } +func TestParseDestinationBool(t *testing.T) { + var dest bool + a := App{ + Flags: []Flag{ + BoolFlag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) { + if dest != true { + t.Errorf("expected destination Bool true") + } + }, + } + a.Run([]string{"run", "--dest"}) +} + func TestParseMultiBoolFromEnv(t *testing.T) { os.Clearenv() os.Setenv("APP_DEBUG", "1") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, + a := App{ + Flags: []Flag{ + BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.Bool("debug") != true { t.Errorf("main name not set from env") } @@ -600,11 +670,11 @@ func TestParseMultiBoolFromEnv(t *testing.T) { func TestParseMultiBoolFromEnvCascade(t *testing.T) { os.Clearenv() os.Setenv("APP_DEBUG", "1") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, + a := App{ + Flags: []Flag{ + BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.Bool("debug") != true { t.Errorf("main name not set from env") } @@ -617,11 +687,11 @@ func TestParseMultiBoolFromEnvCascade(t *testing.T) { } func TestParseMultiBoolT(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "serve, s"}, + a := App{ + Flags: []Flag{ + BoolTFlag{Name: "serve, s"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.BoolT("serve") != true { t.Errorf("main name not set") } @@ -633,14 +703,32 @@ func TestParseMultiBoolT(t *testing.T) { a.Run([]string{"run", "--serve"}) } +func TestParseDestinationBoolT(t *testing.T) { + var dest bool + a := App{ + Flags: []Flag{ + BoolTFlag{ + Name: "dest", + Destination: &dest, + }, + }, + Action: func(ctx *Context) { + if dest != true { + t.Errorf("expected destination BoolT true") + } + }, + } + a.Run([]string{"run", "--dest"}) +} + func TestParseMultiBoolTFromEnv(t *testing.T) { os.Clearenv() os.Setenv("APP_DEBUG", "0") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, + a := App{ + Flags: []Flag{ + BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.BoolT("debug") != false { t.Errorf("main name not set from env") } @@ -655,11 +743,11 @@ func TestParseMultiBoolTFromEnv(t *testing.T) { func TestParseMultiBoolTFromEnvCascade(t *testing.T) { os.Clearenv() os.Setenv("APP_DEBUG", "0") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, + a := App{ + Flags: []Flag{ + BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if ctx.BoolT("debug") != false { t.Errorf("main name not set from env") } @@ -690,11 +778,11 @@ func (p *Parser) String() string { } func TestParseGeneric(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}}, + a := App{ + Flags: []Flag{ + GenericFlag{Name: "serve, s", Value: &Parser{}}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) { t.Errorf("main name not set") } @@ -709,11 +797,11 @@ func TestParseGeneric(t *testing.T) { func TestParseGenericFromEnv(t *testing.T) { os.Clearenv() os.Setenv("APP_SERVE", "20,30") - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, + a := App{ + Flags: []Flag{ + GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) { t.Errorf("main name not set from env") } @@ -728,11 +816,11 @@ func TestParseGenericFromEnv(t *testing.T) { func TestParseGenericFromEnvCascade(t *testing.T) { os.Clearenv() os.Setenv("APP_FOO", "99,2000") - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"}, + a := App{ + Flags: []Flag{ + GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"}, }, - Action: func(ctx *cli.Context) { + Action: func(ctx *Context) { if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) { t.Errorf("value not set from env") } diff --git a/vendor/github.com/codegangsta/cli/help.go b/vendor/github.com/codegangsta/cli/help.go index 8d176556..a246f63a 100644 --- a/vendor/github.com/codegangsta/cli/help.go +++ b/vendor/github.com/codegangsta/cli/help.go @@ -1,6 +1,12 @@ package cli -import "fmt" +import ( + "fmt" + "io" + "strings" + "text/tabwriter" + "text/template" +) // The text template for the Default help topic. // cli.go uses text/template to render templates. You can @@ -9,30 +15,33 @@ var AppHelpTemplate = `NAME: {{.Name}} - {{.Usage}} USAGE: - {{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...] - + {{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{if .Version}} VERSION: {{.Version}} - -AUTHOR(S): - {{range .Authors}}{{ . }} {{end}} - + {{end}}{{if len .Authors}} +AUTHOR(S): + {{range .Authors}}{{ . }}{{end}} + {{end}}{{if .Commands}} COMMANDS: - {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} - {{end}}{{if .Flags}} + {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} + {{end}}{{end}}{{if .Flags}} GLOBAL OPTIONS: {{range .Flags}}{{.}} - {{end}}{{end}} + {{end}}{{end}}{{if .Copyright }} +COPYRIGHT: + {{.Copyright}} + {{end}} ` // The text template for the command help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var CommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} + {{.HelpName}} - {{.Usage}} USAGE: - command {{.Name}}{{if .Flags}} [command options]{{end}} [arguments...]{{if .Description}} + {{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Description}} DESCRIPTION: {{.Description}}{{end}}{{if .Flags}} @@ -46,13 +55,13 @@ OPTIONS: // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var SubcommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} + {{.HelpName}} - {{.Usage}} USAGE: - {{.Name}} command{{if .Flags}} [command options]{{end}} [arguments...] + {{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} COMMANDS: - {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} + {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} {{end}}{{if .Flags}} OPTIONS: {{range .Flags}}{{.}} @@ -61,8 +70,9 @@ OPTIONS: var helpCommand = Command{ Name: "help", - ShortName: "h", + Aliases: []string{"h"}, Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", Action: func(c *Context) { args := c.Args() if args.Present() { @@ -75,8 +85,9 @@ var helpCommand = Command{ var helpSubcommand = Command{ Name: "help", - ShortName: "h", + Aliases: []string{"h"}, Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", Action: func(c *Context) { args := c.Args() if args.Present() { @@ -87,47 +98,46 @@ var helpSubcommand = Command{ }, } -// Prints help for the App -type helpPrinter func(templ string, data interface{}) +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) -var HelpPrinter helpPrinter = nil +var HelpPrinter helpPrinter = printHelp // Prints version for the App var VersionPrinter = printVersion func ShowAppHelp(c *Context) { - HelpPrinter(AppHelpTemplate, c.App) + HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) } // Prints the list of subcommands as the default app completion method func DefaultAppComplete(c *Context) { for _, command := range c.App.Commands { - fmt.Fprintln(c.App.Writer, command.Name) - if command.ShortName != "" { - fmt.Fprintln(c.App.Writer, command.ShortName) + for _, name := range command.Names() { + fmt.Fprintln(c.App.Writer, name) } } } // Prints help for the given command -func ShowCommandHelp(c *Context, command string) { +func ShowCommandHelp(ctx *Context, command string) { // show the subcommand help for a command with subcommands if command == "" { - HelpPrinter(SubcommandHelpTemplate, c.App) + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) return } - for _, c := range c.App.Commands { + for _, c := range ctx.App.Commands { if c.HasName(command) { - HelpPrinter(CommandHelpTemplate, c) + HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) return } } - if c.App.CommandNotFound != nil { - c.App.CommandNotFound(c, command) + if ctx.App.CommandNotFound != nil { + ctx.App.CommandNotFound(ctx, command) } else { - fmt.Fprintf(c.App.Writer, "No help topic for '%v'\n", command) + fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command) } } @@ -161,22 +171,42 @@ func ShowCommandCompletions(ctx *Context, command string) { } } -func checkVersion(c *Context) bool { - if c.GlobalBool("version") { - ShowVersion(c) - return true +func printHelp(out io.Writer, templ string, data interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, } - return false + w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + err := t.Execute(w, data) + if err != nil { + panic(err) + } + w.Flush() +} + +func checkVersion(c *Context) bool { + found := false + if VersionFlag.Name != "" { + eachName(VersionFlag.Name, func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found } func checkHelp(c *Context) bool { - if c.GlobalBool("h") || c.GlobalBool("help") { - ShowAppHelp(c) - return true + found := false + if HelpFlag.Name != "" { + eachName(HelpFlag.Name, func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) } - - return false + return found } func checkCommandHelp(c *Context, name string) bool { diff --git a/vendor/github.com/codegangsta/cli/help_test.go b/vendor/github.com/codegangsta/cli/help_test.go new file mode 100644 index 00000000..350e2633 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/help_test.go @@ -0,0 +1,94 @@ +package cli + +import ( + "bytes" + "testing" +) + +func Test_ShowAppHelp_NoAuthor(t *testing.T) { + output := new(bytes.Buffer) + app := NewApp() + app.Writer = output + + c := NewContext(app, nil, nil) + + ShowAppHelp(c) + + if bytes.Index(output.Bytes(), []byte("AUTHOR(S):")) != -1 { + t.Errorf("expected\n%snot to include %s", output.String(), "AUTHOR(S):") + } +} + +func Test_ShowAppHelp_NoVersion(t *testing.T) { + output := new(bytes.Buffer) + app := NewApp() + app.Writer = output + + app.Version = "" + + c := NewContext(app, nil, nil) + + ShowAppHelp(c) + + if bytes.Index(output.Bytes(), []byte("VERSION:")) != -1 { + t.Errorf("expected\n%snot to include %s", output.String(), "VERSION:") + } +} + +func Test_Help_Custom_Flags(t *testing.T) { + oldFlag := HelpFlag + defer func() { + HelpFlag = oldFlag + }() + + HelpFlag = BoolFlag{ + Name: "help, x", + Usage: "show help", + } + + app := App{ + Flags: []Flag{ + BoolFlag{Name: "foo, h"}, + }, + Action: func(ctx *Context) { + if ctx.Bool("h") != true { + t.Errorf("custom help flag not set") + } + }, + } + output := new(bytes.Buffer) + app.Writer = output + app.Run([]string{"test", "-h"}) + if output.Len() > 0 { + t.Errorf("unexpected output: %s", output.String()) + } +} + +func Test_Version_Custom_Flags(t *testing.T) { + oldFlag := VersionFlag + defer func() { + VersionFlag = oldFlag + }() + + VersionFlag = BoolFlag{ + Name: "version, V", + Usage: "show version", + } + + app := App{ + Flags: []Flag{ + BoolFlag{Name: "foo, v"}, + }, + Action: func(ctx *Context) { + if ctx.Bool("v") != true { + t.Errorf("custom version flag not set") + } + }, + } + output := new(bytes.Buffer) + app.Writer = output + app.Run([]string{"test", "-v"}) + if output.Len() > 0 { + t.Errorf("unexpected output: %s", output.String()) + } +} diff --git a/vendor/github.com/codegangsta/cli/helpers_test.go b/vendor/github.com/codegangsta/cli/helpers_test.go index cdc4feb2..b1b7339f 100644 --- a/vendor/github.com/codegangsta/cli/helpers_test.go +++ b/vendor/github.com/codegangsta/cli/helpers_test.go @@ -1,4 +1,4 @@ -package cli_test +package cli import ( "reflect" @@ -7,13 +7,13 @@ import ( /* Test Helpers */ func expect(t *testing.T, a interface{}, b interface{}) { - if a != b { + if !reflect.DeepEqual(a, b) { t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) } } func refute(t *testing.T, a interface{}, b interface{}) { - if a == b { + if reflect.DeepEqual(a, b) { t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) } } diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap index bcfe6635..2c0af060 100644 --- a/vendor/github.com/docker/distribution/.mailmap +++ b/vendor/github.com/docker/distribution/.mailmap @@ -3,4 +3,5 @@ Stephen J Day Stephen Day Olivier Gambier Olivier Gambier Brian Bland Brian Bland Josh Hawn Josh Hawn -Richard Scothern Richard \ No newline at end of file +Richard Scothern Richard +Richard Scothern Richard Scothern diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS index 2b9e4c3e..5da5d1c6 100644 --- a/vendor/github.com/docker/distribution/AUTHORS +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -1,6 +1,8 @@ +Aaron Lehmann Adam Enger Adrian Mouat Ahmet Alp Balkan +Alex Chan Alex Elman Amy Lindburg Andrey Kostov @@ -8,47 +10,63 @@ Andy Goldstein Anton Tiurin Antonio Mercado Arnaud Porterie +Ayose Cazorla BadZen Ben Firshman bin liu Brian Bland burnettk +Chris Dillon Daisuke Fujita +Darren Shepherd Dave Trombley +Dave Tucker David Lawrence +David Verhasselt David Xia +davidli Derek McGowan Diogo Mónica Donald Huang Doug Davis +Florentin Raud Frederick F. Kautz IV Henri Gomez Hu Keping Ian Babrou Jeff Nickoloff Jessie Frazelle +Jianqing Wang +Jon Poler Jordan Liggitt Josh Hawn Julien Fernandez Kelsey Hightower Kenneth Lim +Li Yi +Luke Carpenter Mary Anthony +Matt Bentley Matt Robenolt Michael Prokop moxiegirl Nathan Sullivan +nevermosby Nghia Tran Oilbeater Olivier Gambier +Olivier Jacques +Patrick Devine Philip Misiowiec Richard Scothern -Richard Scothern Sebastiaan van Stijn Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn Spencer Rinehart Stephen J Day +Sylvain Baubeau +tgic Thomas Sjögren Tianon Gravi Tibor Vass diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md index b91a1d0f..1a9ecb74 100644 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -33,6 +33,7 @@ By following these simple rules you will get better and faster feedback on your - please refrain from adding "same thing here" or "+1" comments - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. ### If you have not found an existing issue that describes your problem: diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index 5555606f..7a4b3e9e 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -6,14 +6,14 @@ RUN apt-get update && \ ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH -ENV DOCKER_BUILDTAGS include_rados +ENV DOCKER_BUILDTAGS include_rados include_oss include_gcs WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR -COPY cmd/registry/config-dev.yml $DISTRIBUTION_DIR/cmd/registry/config.yml +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml RUN make PREFIX=/go clean binaries VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] -CMD ["cmd/registry/config.yml"] +CMD ["/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/Godeps/Godeps.json b/vendor/github.com/docker/distribution/Godeps/Godeps.json deleted file mode 100644 index 355596df..00000000 --- a/vendor/github.com/docker/distribution/Godeps/Godeps.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "ImportPath": "github.com/docker/distribution", - "GoVersion": "go1.4.2", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/AdRoll/goamz/aws", - "Rev": "f8c4952d5bc3056c0ca6711a1f56bc88b828d989" - }, - { - "ImportPath": "github.com/AdRoll/goamz/cloudfront", - "Rev": "f8c4952d5bc3056c0ca6711a1f56bc88b828d989" - }, - { - "ImportPath": "github.com/AdRoll/goamz/s3", - "Rev": "f8c4952d5bc3056c0ca6711a1f56bc88b828d989" - }, - { - "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc" - }, - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.7.3", - "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" - }, - { - "ImportPath": "github.com/bugsnag/bugsnag-go", - "Comment": "v1.0.2-5-gb1d1530", - "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" - }, - { - "ImportPath": "github.com/bugsnag/osext", - "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" - }, - { - "ImportPath": "github.com/bugsnag/panicwrap", - "Rev": "e5f9854865b9778a45169fc249e99e338d4d6f27" - }, - { - "ImportPath": "github.com/codegangsta/cli", - "Comment": "1.2.0-66-g6086d79", - "Rev": "6086d7927ec35315964d9fea46df6c04e6d697c1" - }, - { - "ImportPath": "github.com/denverdino/aliyungo/oss", - "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" - }, - { - "ImportPath": "github.com/denverdino/aliyungo/util", - "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" - }, - { - "ImportPath": "github.com/docker/docker/pkg/tarsum", - "Comment": "v1.4.1-3932-gb63ec6e", - "Rev": "b63ec6e4b1f6f5c77a6a74a52fcea9564538c575" - }, - { - "ImportPath": "github.com/docker/libtrust", - "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" - }, - { - "ImportPath": "github.com/garyburd/redigo/internal", - "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" - }, - { - "ImportPath": "github.com/garyburd/redigo/redis", - "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" - }, - { - "ImportPath": "github.com/gorilla/context", - "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" - }, - { - "ImportPath": "github.com/gorilla/handlers", - "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" - }, - { - "ImportPath": "github.com/gorilla/mux", - "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" - }, - { - "ImportPath": "github.com/noahdesu/go-ceph/rados", - "Comment": "v.0.3.0-29-gb15639c", - "Rev": "b15639c44c05368348355229070361395d9152ee" - }, - { - "ImportPath": "github.com/stevvooe/resumable", - "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" - }, - { - "ImportPath": "github.com/mitchellh/mapstructure", - "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" - }, - { - "ImportPath": "github.com/ncw/swift", - "Rev": "22c8fa9fb5ba145b4d4e2cebb027e84b1a7b1296" - }, - { - "ImportPath": "github.com/yvasiyarov/go-metrics", - "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" - }, - { - "ImportPath": "github.com/yvasiyarov/gorelic", - "Comment": "v0.0.6-8-ga9bba5b", - "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" - }, - { - "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", - "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" - }, - { - "ImportPath": "golang.org/x/crypto/bcrypt", - "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" - }, - { - "ImportPath": "golang.org/x/crypto/blowfish", - "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "1dfe7915deaf3f80b962c163b918868d8a6d8974" - }, - { - "ImportPath": "gopkg.in/check.v1", - "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" - }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" - } - ] -} diff --git a/vendor/github.com/docker/distribution/Godeps/Readme b/vendor/github.com/docker/distribution/Godeps/Readme deleted file mode 100644 index 4cdaa53d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/.gitignore b/vendor/github.com/docker/distribution/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d684..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt.go deleted file mode 100644 index c0654f5d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt.go +++ /dev/null @@ -1,74 +0,0 @@ -package aws - -import ( - "time" -) - -// AttemptStrategy represents a strategy for waiting for an action -// to complete successfully. This is an internal type used by the -// implementation of other goamz packages. -type AttemptStrategy struct { - Total time.Duration // total duration of attempt. - Delay time.Duration // interval between each try in the burst. - Min int // minimum number of retries; overrides Total -} - -type Attempt struct { - strategy AttemptStrategy - last time.Time - end time.Time - force bool - count int -} - -// Start begins a new sequence of attempts for the given strategy. -func (s AttemptStrategy) Start() *Attempt { - now := time.Now() - return &Attempt{ - strategy: s, - last: now, - end: now.Add(s.Total), - force: true, - } -} - -// Next waits until it is time to perform the next attempt or returns -// false if it is time to stop trying. -func (a *Attempt) Next() bool { - now := time.Now() - sleep := a.nextSleep(now) - if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { - return false - } - a.force = false - if sleep > 0 && a.count > 0 { - time.Sleep(sleep) - now = time.Now() - } - a.count++ - a.last = now - return true -} - -func (a *Attempt) nextSleep(now time.Time) time.Duration { - sleep := a.strategy.Delay - now.Sub(a.last) - if sleep < 0 { - return 0 - } - return sleep -} - -// HasNext returns whether another attempt will be made if the current -// one fails. If it returns true, the following call to Next is -// guaranteed to return true. -func (a *Attempt) HasNext() bool { - if a.force || a.strategy.Min > a.count { - return true - } - now := time.Now() - if now.Add(a.nextSleep(now)).Before(a.end) { - a.force = true - return true - } - return false -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt_test.go deleted file mode 100644 index a6a0afc9..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/attempt_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package aws_test - -import ( - "github.com/AdRoll/goamz/aws" - "gopkg.in/check.v1" - "time" -) - -func (S) TestAttemptTiming(c *check.C) { - testAttempt := aws.AttemptStrategy{ - Total: 0.25e9, - Delay: 0.1e9, - } - want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9} - got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing - t0 := time.Now() - for a := testAttempt.Start(); a.Next(); { - got = append(got, time.Now().Sub(t0)) - } - got = append(got, time.Now().Sub(t0)) - c.Assert(got, check.HasLen, len(want)) - const margin = 0.01e9 - for i, got := range want { - lo := want[i] - margin - hi := want[i] + margin - if got < lo || got > hi { - c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds()) - } - } -} - -func (S) TestAttemptNextHasNext(c *check.C) { - a := aws.AttemptStrategy{}.Start() - c.Assert(a.Next(), check.Equals, true) - c.Assert(a.Next(), check.Equals, false) - - a = aws.AttemptStrategy{}.Start() - c.Assert(a.Next(), check.Equals, true) - c.Assert(a.HasNext(), check.Equals, false) - c.Assert(a.Next(), check.Equals, false) - - a = aws.AttemptStrategy{Total: 2e8}.Start() - c.Assert(a.Next(), check.Equals, true) - c.Assert(a.HasNext(), check.Equals, true) - time.Sleep(2e8) - c.Assert(a.HasNext(), check.Equals, true) - c.Assert(a.Next(), check.Equals, true) - c.Assert(a.Next(), check.Equals, false) - - a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start() - time.Sleep(1e8) - c.Assert(a.Next(), check.Equals, true) - c.Assert(a.HasNext(), check.Equals, true) - c.Assert(a.Next(), check.Equals, true) - c.Assert(a.HasNext(), check.Equals, false) - c.Assert(a.Next(), check.Equals, false) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws.go deleted file mode 100644 index 87c2d6da..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws.go +++ /dev/null @@ -1,629 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// -package aws - -import ( - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "os/user" - "path" - "regexp" - "strings" - "time" -) - -// Regular expressions for INI files -var ( - iniSectionRegexp = regexp.MustCompile(`^\s*\[([^\[\]]+)\]\s*$`) - iniSettingRegexp = regexp.MustCompile(`^\s*(.+?)\s*=\s*(.*\S)\s*$`) -) - -// Defines the valid signers -const ( - V2Signature = iota - V4Signature = iota - Route53Signature = iota -) - -// Defines the service endpoint and correct Signer implementation to use -// to sign requests for this endpoint -type ServiceInfo struct { - Endpoint string - Signer uint -} - -// Region defines the URLs where AWS services may be accessed. -// -// See http://goo.gl/d8BP1 for more details. -type Region struct { - Name string // the canonical name of this region. - EC2Endpoint string - S3Endpoint string - S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. - S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. - S3LowercaseBucket bool // true if the region requires bucket names to be lower case. - SDBEndpoint string - SNSEndpoint string - SQSEndpoint string - SESEndpoint string - IAMEndpoint string - ELBEndpoint string - KMSEndpoint string - DynamoDBEndpoint string - CloudWatchServicepoint ServiceInfo - AutoScalingEndpoint string - RDSEndpoint ServiceInfo - KinesisEndpoint string - STSEndpoint string - CloudFormationEndpoint string - ElastiCacheEndpoint string -} - -var Regions = map[string]Region{ - APNortheast.Name: APNortheast, - APSoutheast.Name: APSoutheast, - APSoutheast2.Name: APSoutheast2, - EUCentral.Name: EUCentral, - EUWest.Name: EUWest, - USEast.Name: USEast, - USWest.Name: USWest, - USWest2.Name: USWest2, - USGovWest.Name: USGovWest, - SAEast.Name: SAEast, - CNNorth1.Name: CNNorth1, -} - -// Designates a signer interface suitable for signing AWS requests, params -// should be appropriately encoded for the request before signing. -// -// A signer should be initialized with Auth and the appropriate endpoint. -type Signer interface { - Sign(method, path string, params map[string]string) -} - -// An AWS Service interface with the API to query the AWS service -// -// Supplied as an easy way to mock out service calls during testing. -type AWSService interface { - // Queries the AWS service at a given method/path with the params and - // returns an http.Response and error - Query(method, path string, params map[string]string) (*http.Response, error) - // Builds an error given an XML payload in the http.Response, can be used - // to process an error if the status code is not 200 for example. - BuildError(r *http.Response) error -} - -// Implements a Server Query/Post API to easily query AWS services and build -// errors when desired -type Service struct { - service ServiceInfo - signer Signer -} - -// Create a base set of params for an action -func MakeParams(action string) map[string]string { - params := make(map[string]string) - params["Action"] = action - return params -} - -// Create a new AWS server to handle making requests -func NewService(auth Auth, service ServiceInfo) (s *Service, err error) { - var signer Signer - switch service.Signer { - case V2Signature: - signer, err = NewV2Signer(auth, service) - // case V4Signature: - // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"]) - default: - err = fmt.Errorf("Unsupported signer for service") - } - if err != nil { - return - } - s = &Service{service: service, signer: signer} - return -} - -func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) { - params["Timestamp"] = time.Now().UTC().Format(time.RFC3339) - u, err := url.Parse(s.service.Endpoint) - if err != nil { - return nil, err - } - u.Path = path - - s.signer.Sign(method, path, params) - if method == "GET" { - u.RawQuery = multimap(params).Encode() - resp, err = http.Get(u.String()) - } else if method == "POST" { - resp, err = http.PostForm(u.String(), multimap(params)) - } - - return -} - -func (s *Service) BuildError(r *http.Response) error { - errors := ErrorResponse{} - xml.NewDecoder(r.Body).Decode(&errors) - var err Error - err = errors.Errors - err.RequestId = errors.RequestId - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - return &err -} - -type ServiceError interface { - error - ErrorCode() string -} - -type ErrorResponse struct { - Errors Error `xml:"Error"` - RequestId string // A unique ID for tracking the request -} - -type Error struct { - StatusCode int - Type string - Code string - Message string - RequestId string -} - -func (err *Error) Error() string { - return fmt.Sprintf("Type: %s, Code: %s, Message: %s", - err.Type, err.Code, err.Message, - ) -} - -func (err *Error) ErrorCode() string { - return err.Code -} - -type Auth struct { - AccessKey, SecretKey string - token string - expiration time.Time -} - -func (a *Auth) Token() string { - if a.token == "" { - return "" - } - if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock - auth, err := GetAuth("", "", "", time.Time{}) - if err == nil { - *a = auth - } - } - return a.token -} - -func (a *Auth) Expiration() time.Time { - return a.expiration -} - -// To be used with other APIs that return auth credentials such as STS -func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth { - return &Auth{ - AccessKey: accessKey, - SecretKey: secretKey, - token: token, - expiration: expiration, - } -} - -// ResponseMetadata -type ResponseMetadata struct { - RequestId string // A unique ID for tracking the request -} - -type BaseResponse struct { - ResponseMetadata ResponseMetadata -} - -var unreserved = make([]bool, 128) -var hex = "0123456789ABCDEF" - -func init() { - // RFC3986 - u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" - for _, c := range u { - unreserved[c] = true - } -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -type credentials struct { - Code string - LastUpdated string - Type string - AccessKeyId string - SecretAccessKey string - Token string - Expiration string -} - -// GetMetaData retrieves instance metadata about the current machine. -// -// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. -func GetMetaData(path string) (contents []byte, err error) { - c := http.Client{ - Transport: &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - deadline := time.Now().Add(5 * time.Second) - c, err := net.DialTimeout(netw, addr, time.Second*2) - if err != nil { - return nil, err - } - c.SetDeadline(deadline) - return c, nil - }, - }, - } - - url := "http://169.254.169.254/latest/meta-data/" + path - - resp, err := c.Get(url) - if err != nil { - return - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) - return - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - return []byte(body), err -} - -func GetRegion(regionName string) (region Region) { - region = Regions[regionName] - return -} - -// GetInstanceCredentials creates an Auth based on the instance's role credentials. -// If the running instance is not in EC2 or does not have a valid IAM role, an error will be returned. -// For more info about setting up IAM roles, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func GetInstanceCredentials() (cred credentials, err error) { - credentialPath := "iam/security-credentials/" - - // Get the instance role - role, err := GetMetaData(credentialPath) - if err != nil { - return - } - - // Get the instance role credentials - credentialJSON, err := GetMetaData(credentialPath + string(role)) - if err != nil { - return - } - - err = json.Unmarshal([]byte(credentialJSON), &cred) - return -} - -// GetAuth creates an Auth based on either passed in credentials, -// environment information or instance based role credentials. -func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) { - // First try passed in credentials - if accessKey != "" && secretKey != "" { - return Auth{accessKey, secretKey, token, expiration}, nil - } - - // Next try to get auth from the environment - auth, err = EnvAuth() - if err == nil { - // Found auth, return - return - } - - // Next try getting auth from the instance role - cred, err := GetInstanceCredentials() - if err == nil { - // Found auth, return - auth.AccessKey = cred.AccessKeyId - auth.SecretKey = cred.SecretAccessKey - auth.token = cred.Token - exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration) - if err != nil { - err = fmt.Errorf("Error Parsing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err) - } - auth.expiration = exptdate - return auth, err - } - - // Next try getting auth from the credentials file - auth, err = CredentialFileAuth("", "", time.Minute*5) - if err == nil { - return - } - - //err = errors.New("No valid AWS authentication found") - err = fmt.Errorf("No valid AWS authentication found: %s", err) - return auth, err -} - -// EnvAuth creates an Auth based on environment information. -// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment -// variables are used. -func EnvAuth() (auth Auth, err error) { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") - if auth.AccessKey == "" { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") - } - - auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") - if auth.SecretKey == "" { - auth.SecretKey = os.Getenv("AWS_SECRET_KEY") - } - if auth.AccessKey == "" { - err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") - } - if auth.SecretKey == "" { - err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") - } - return -} - -// CredentialFileAuth creates and Auth based on a credentials file. The file -// contains various authentication profiles for use with AWS. -// -// The credentials file, which is used by other AWS SDKs, is documented at -// http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs -func CredentialFileAuth(filePath string, profile string, expiration time.Duration) (auth Auth, err error) { - if profile == "" { - profile = "default" - } - - if filePath == "" { - u, err := user.Current() - if err != nil { - return auth, err - } - - filePath = path.Join(u.HomeDir, ".aws", "credentials") - } - - // read the file, then parse the INI - contents, err := ioutil.ReadFile(filePath) - if err != nil { - return - } - - profiles := parseINI(string(contents)) - profileData, ok := profiles[profile] - - if !ok { - err = errors.New("The credentials file did not contain the profile") - return - } - - keyId, ok := profileData["aws_access_key_id"] - if !ok { - err = errors.New("The credentials file did not contain required attribute aws_access_key_id") - return - } - - secretKey, ok := profileData["aws_secret_access_key"] - if !ok { - err = errors.New("The credentials file did not contain required attribute aws_secret_access_key") - return - } - - auth.AccessKey = keyId - auth.SecretKey = secretKey - - if token, ok := profileData["aws_session_token"]; ok { - auth.token = token - } - - auth.expiration = time.Now().Add(expiration) - - return -} - -// parseINI takes the contents of a credentials file and returns a map, whose keys -// are the various profiles, and whose values are maps of the settings for the -// profiles -func parseINI(fileContents string) map[string]map[string]string { - profiles := make(map[string]map[string]string) - - lines := strings.Split(fileContents, "\n") - - var currentSection map[string]string - for _, line := range lines { - // remove comments, which start with a semi-colon - if split := strings.Split(line, ";"); len(split) > 1 { - line = split[0] - } - - // check if the line is the start of a profile. - // - // for example: - // [default] - // - // otherwise, check for the proper setting - // property=value - if sectMatch := iniSectionRegexp.FindStringSubmatch(line); len(sectMatch) == 2 { - currentSection = make(map[string]string) - profiles[sectMatch[1]] = currentSection - } else if setMatch := iniSettingRegexp.FindStringSubmatch(line); len(setMatch) == 3 && currentSection != nil { - currentSection[setMatch[1]] = setMatch[2] - } - } - - return profiles -} - -// Encode takes a string and URI-encodes it in a way suitable -// to be used in AWS signatures. -func Encode(s string) string { - encode := false - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - encode = true - break - } - } - if !encode { - return s - } - e := make([]byte, len(s)*3) - ei := 0 - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - e[ei] = '%' - e[ei+1] = hex[c>>4] - e[ei+2] = hex[c&0xF] - ei += 3 - } else { - e[ei] = c - ei += 1 - } - } - return string(e[:ei]) -} - -func dialTimeout(network, addr string) (net.Conn, error) { - return net.DialTimeout(network, addr, time.Duration(2*time.Second)) -} - -func AvailabilityZone() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/placement/availability-zone") - if err != nil { - return "unknown" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "unknown" - } else { - return string(body) - } - } -} - -func InstanceRegion() string { - az := AvailabilityZone() - if az == "unknown" { - return az - } else { - region := az[:len(az)-1] - return region - } -} - -func InstanceId() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-id") - if err != nil { - return "unknown" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "unknown" - } else { - return string(body) - } - } -} - -func InstanceType() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-type") - if err != nil { - return "unknown" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "unknown" - } else { - return string(body) - } - } -} - -func ServerLocalIp() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/local-ipv4") - if err != nil { - return "127.0.0.1" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "127.0.0.1" - } else { - return string(body) - } - } -} - -func ServerPublicIp() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/public-ipv4") - if err != nil { - return "127.0.0.1" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "127.0.0.1" - } else { - return string(body) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws_test.go deleted file mode 100644 index 0577f5c8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/aws_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package aws_test - -import ( - "github.com/AdRoll/goamz/aws" - "gopkg.in/check.v1" - "io/ioutil" - "os" - "strings" - "testing" - "time" -) - -func Test(t *testing.T) { - check.TestingT(t) -} - -var _ = check.Suite(&S{}) - -type S struct { - environ []string -} - -func (s *S) SetUpSuite(c *check.C) { - s.environ = os.Environ() -} - -func (s *S) TearDownTest(c *check.C) { - os.Clearenv() - for _, kv := range s.environ { - l := strings.SplitN(kv, "=", 2) - os.Setenv(l[0], l[1]) - } -} - -func (s *S) TestEnvAuthNoSecret(c *check.C) { - os.Clearenv() - _, err := aws.EnvAuth() - c.Assert(err, check.ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") -} - -func (s *S) TestEnvAuthNoAccess(c *check.C) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "foo") - _, err := aws.EnvAuth() - c.Assert(err, check.ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") -} - -func (s *S) TestEnvAuth(c *check.C) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") - os.Setenv("AWS_ACCESS_KEY_ID", "access") - auth, err := aws.EnvAuth() - c.Assert(err, check.IsNil) - c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestEnvAuthAlt(c *check.C) { - os.Clearenv() - os.Setenv("AWS_SECRET_KEY", "secret") - os.Setenv("AWS_ACCESS_KEY", "access") - auth, err := aws.EnvAuth() - c.Assert(err, check.IsNil) - c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestGetAuthStatic(c *check.C) { - exptdate := time.Now().Add(time.Hour) - auth, err := aws.GetAuth("access", "secret", "token", exptdate) - c.Assert(err, check.IsNil) - c.Assert(auth.AccessKey, check.Equals, "access") - c.Assert(auth.SecretKey, check.Equals, "secret") - c.Assert(auth.Token(), check.Equals, "token") - c.Assert(auth.Expiration(), check.Equals, exptdate) -} - -func (s *S) TestGetAuthEnv(c *check.C) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") - os.Setenv("AWS_ACCESS_KEY_ID", "access") - auth, err := aws.GetAuth("", "", "", time.Time{}) - c.Assert(err, check.IsNil) - c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) -} - -func (s *S) TestEncode(c *check.C) { - c.Assert(aws.Encode("foo"), check.Equals, "foo") - c.Assert(aws.Encode("/"), check.Equals, "%2F") -} - -func (s *S) TestRegionsAreNamed(c *check.C) { - for n, r := range aws.Regions { - c.Assert(n, check.Equals, r.Name) - } -} - -func (s *S) TestCredentialsFileAuth(c *check.C) { - file, err := ioutil.TempFile("", "creds") - - if err != nil { - c.Fatal(err) - } - - iniFile := ` - -[default] ; comment 123 -aws_access_key_id = keyid1 ;comment -aws_secret_access_key=key1 - - [profile2] - aws_access_key_id = keyid2 ;comment - aws_secret_access_key=key2 - aws_session_token=token1 - -` - _, err = file.WriteString(iniFile) - if err != nil { - c.Fatal(err) - } - - err = file.Close() - if err != nil { - c.Fatal(err) - } - - // check non-existant profile - _, err = aws.CredentialFileAuth(file.Name(), "no profile", 30*time.Minute) - c.Assert(err, check.Not(check.Equals), nil) - - defaultProfile, err := aws.CredentialFileAuth(file.Name(), "default", 30*time.Minute) - c.Assert(err, check.Equals, nil) - c.Assert(defaultProfile.AccessKey, check.Equals, "keyid1") - c.Assert(defaultProfile.SecretKey, check.Equals, "key1") - c.Assert(defaultProfile.Token(), check.Equals, "") - - profile2, err := aws.CredentialFileAuth(file.Name(), "profile2", 30*time.Minute) - c.Assert(err, check.Equals, nil) - c.Assert(profile2.AccessKey, check.Equals, "keyid2") - c.Assert(profile2.SecretKey, check.Equals, "key2") - c.Assert(profile2.Token(), check.Equals, "token1") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/client.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/client.go deleted file mode 100644 index 86d2ccec..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/client.go +++ /dev/null @@ -1,124 +0,0 @@ -package aws - -import ( - "math" - "net" - "net/http" - "time" -) - -type RetryableFunc func(*http.Request, *http.Response, error) bool -type WaitFunc func(try int) -type DeadlineFunc func() time.Time - -type ResilientTransport struct { - // Timeout is the maximum amount of time a dial will wait for - // a connect to complete. - // - // The default is no timeout. - // - // With or without a timeout, the operating system may impose - // its own earlier timeout. For instance, TCP timeouts are - // often around 3 minutes. - DialTimeout time.Duration - - // MaxTries, if non-zero, specifies the number of times we will retry on - // failure. Retries are only attempted for temporary network errors or known - // safe failures. - MaxTries int - Deadline DeadlineFunc - ShouldRetry RetryableFunc - Wait WaitFunc - transport *http.Transport -} - -// Convenience method for creating an http client -func NewClient(rt *ResilientTransport) *http.Client { - rt.transport = &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - c, err := net.DialTimeout(netw, addr, rt.DialTimeout) - if err != nil { - return nil, err - } - c.SetDeadline(rt.Deadline()) - return c, nil - }, - Proxy: http.ProxyFromEnvironment, - } - // TODO: Would be nice is ResilientTransport allowed clients to initialize - // with http.Transport attributes. - return &http.Client{ - Transport: rt, - } -} - -var retryingTransport = &ResilientTransport{ - Deadline: func() time.Time { - return time.Now().Add(5 * time.Second) - }, - DialTimeout: 10 * time.Second, - MaxTries: 3, - ShouldRetry: awsRetry, - Wait: ExpBackoff, -} - -// Exported default client -var RetryingClient = NewClient(retryingTransport) - -func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.tries(req) -} - -// Retry a request a maximum of t.MaxTries times. -// We'll only retry if the proper criteria are met. -// If a wait function is specified, wait that amount of time -// In between requests. -func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { - for try := 0; try < t.MaxTries; try += 1 { - res, err = t.transport.RoundTrip(req) - - if !t.ShouldRetry(req, res, err) { - break - } - if res != nil { - res.Body.Close() - } - if t.Wait != nil { - t.Wait(try) - } - } - - return -} - -func ExpBackoff(try int) { - time.Sleep(100 * time.Millisecond * - time.Duration(math.Exp2(float64(try)))) -} - -func LinearBackoff(try int) { - time.Sleep(time.Duration(try*100) * time.Millisecond) -} - -// Decide if we should retry a request. -// In general, the criteria for retrying a request is described here -// http://docs.aws.amazon.com/general/latest/gr/api-retries.html -func awsRetry(req *http.Request, res *http.Response, err error) bool { - retry := false - - // Retry if there's a temporary network error. - if neterr, ok := err.(net.Error); ok { - if neterr.Temporary() { - retry = true - } - } - - // Retry if we get a 5xx series error. - if res != nil { - if res.StatusCode >= 500 && res.StatusCode < 600 { - retry = true - } - } - - return retry -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/export_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/export_test.go deleted file mode 100644 index 5f4a9dd0..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/export_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package aws - -import ( - "net/http" - "time" -) - -// V4Signer: -// Exporting methods for testing - -func (s *V4Signer) RequestTime(req *http.Request) time.Time { - return s.requestTime(req) -} - -func (s *V4Signer) CanonicalRequest(req *http.Request) string { - return s.canonicalRequest(req, "") -} - -func (s *V4Signer) StringToSign(t time.Time, creq string) string { - return s.stringToSign(t, creq) -} - -func (s *V4Signer) Signature(t time.Time, sts string) string { - return s.signature(t, sts) -} - -func (s *V4Signer) Authorization(header http.Header, t time.Time, signature string) string { - return s.authorization(header, t, signature) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/regions.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/regions.go deleted file mode 100644 index fdc2626b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/regions.go +++ /dev/null @@ -1,265 +0,0 @@ -package aws - -var USGovWest = Region{ - "us-gov-west-1", - "https://ec2.us-gov-west-1.amazonaws.com", - "https://s3-fips-us-gov-west-1.amazonaws.com", - "", - true, - true, - "", - "https://sns.us-gov-west-1.amazonaws.com", - "https://sqs.us-gov-west-1.amazonaws.com", - "", - "https://iam.us-gov.amazonaws.com", - "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", - "", - "https://dynamodb.us-gov-west-1.amazonaws.com", - ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature}, - "https://autoscaling.us-gov-west-1.amazonaws.com", - ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature}, - "", - "https://sts.amazonaws.com", - "https://cloudformation.us-gov-west-1.amazonaws.com", - "", -} - -var USEast = Region{ - "us-east-1", - "https://ec2.us-east-1.amazonaws.com", - "https://s3.amazonaws.com", - "", - false, - false, - "https://sdb.amazonaws.com", - "https://sns.us-east-1.amazonaws.com", - "https://sqs.us-east-1.amazonaws.com", - "https://email.us-east-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-east-1.amazonaws.com", - "https://kms.us-east-1.amazonaws.com", - "https://dynamodb.us-east-1.amazonaws.com", - ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature}, - "https://autoscaling.us-east-1.amazonaws.com", - ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature}, - "https://kinesis.us-east-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.us-east-1.amazonaws.com", - "https://elasticache.us-east-1.amazonaws.com", -} - -var USWest = Region{ - "us-west-1", - "https://ec2.us-west-1.amazonaws.com", - "https://s3-us-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-1.amazonaws.com", - "https://sns.us-west-1.amazonaws.com", - "https://sqs.us-west-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-1.amazonaws.com", - "https://kms.us-west-1.amazonaws.com", - "https://dynamodb.us-west-1.amazonaws.com", - ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature}, - "https://autoscaling.us-west-1.amazonaws.com", - ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature}, - "https://kinesis.us-west-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.us-west-1.amazonaws.com", - "https://elasticache.us-west-1.amazonaws.com", -} - -var USWest2 = Region{ - "us-west-2", - "https://ec2.us-west-2.amazonaws.com", - "https://s3-us-west-2.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-2.amazonaws.com", - "https://sns.us-west-2.amazonaws.com", - "https://sqs.us-west-2.amazonaws.com", - "https://email.us-west-2.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-2.amazonaws.com", - "https://kms.us-west-2.amazonaws.com", - "https://dynamodb.us-west-2.amazonaws.com", - ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature}, - "https://autoscaling.us-west-2.amazonaws.com", - ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature}, - "https://kinesis.us-west-2.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.us-west-2.amazonaws.com", - "https://elasticache.us-west-2.amazonaws.com", -} - -var EUWest = Region{ - "eu-west-1", - "https://ec2.eu-west-1.amazonaws.com", - "https://s3-eu-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.eu-west-1.amazonaws.com", - "https://sns.eu-west-1.amazonaws.com", - "https://sqs.eu-west-1.amazonaws.com", - "https://email.eu-west-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-west-1.amazonaws.com", - "https://kms.eu-west-1.amazonaws.com", - "https://dynamodb.eu-west-1.amazonaws.com", - ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature}, - "https://autoscaling.eu-west-1.amazonaws.com", - ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature}, - "https://kinesis.eu-west-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.eu-west-1.amazonaws.com", - "https://elasticache.eu-west-1.amazonaws.com", -} - -var EUCentral = Region{ - "eu-central-1", - "https://ec2.eu-central-1.amazonaws.com", - "https://s3-eu-central-1.amazonaws.com", - "", - true, - true, - "https://sdb.eu-central-1.amazonaws.com", - "https://sns.eu-central-1.amazonaws.com", - "https://sqs.eu-central-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-central-1.amazonaws.com", - "https://kms.eu-central-1.amazonaws.com", - "https://dynamodb.eu-central-1.amazonaws.com", - ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature}, - "https://autoscaling.eu-central-1.amazonaws.com", - ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature}, - "https://kinesis.eu-central-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.eu-central-1.amazonaws.com", - "", -} - -var APSoutheast = Region{ - "ap-southeast-1", - "https://ec2.ap-southeast-1.amazonaws.com", - "https://s3-ap-southeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-1.amazonaws.com", - "https://sns.ap-southeast-1.amazonaws.com", - "https://sqs.ap-southeast-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", - "https://kms.ap-southeast-1.amazonaws.com", - "https://dynamodb.ap-southeast-1.amazonaws.com", - ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature}, - "https://autoscaling.ap-southeast-1.amazonaws.com", - ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature}, - "https://kinesis.ap-southeast-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.ap-southeast-1.amazonaws.com", - "https://elasticache.ap-southeast-1.amazonaws.com", -} - -var APSoutheast2 = Region{ - "ap-southeast-2", - "https://ec2.ap-southeast-2.amazonaws.com", - "https://s3-ap-southeast-2.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-2.amazonaws.com", - "https://sns.ap-southeast-2.amazonaws.com", - "https://sqs.ap-southeast-2.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", - "https://kms.ap-southeast-2.amazonaws.com", - "https://dynamodb.ap-southeast-2.amazonaws.com", - ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature}, - "https://autoscaling.ap-southeast-2.amazonaws.com", - ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature}, - "https://kinesis.ap-southeast-2.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.ap-southeast-2.amazonaws.com", - "https://elasticache.ap-southeast-2.amazonaws.com", -} - -var APNortheast = Region{ - "ap-northeast-1", - "https://ec2.ap-northeast-1.amazonaws.com", - "https://s3-ap-northeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-northeast-1.amazonaws.com", - "https://sns.ap-northeast-1.amazonaws.com", - "https://sqs.ap-northeast-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", - "https://kms.ap-northeast-1.amazonaws.com", - "https://dynamodb.ap-northeast-1.amazonaws.com", - ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature}, - "https://autoscaling.ap-northeast-1.amazonaws.com", - ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature}, - "https://kinesis.ap-northeast-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.ap-northeast-1.amazonaws.com", - "https://elasticache.ap-northeast-1.amazonaws.com", -} - -var SAEast = Region{ - "sa-east-1", - "https://ec2.sa-east-1.amazonaws.com", - "https://s3-sa-east-1.amazonaws.com", - "", - true, - true, - "https://sdb.sa-east-1.amazonaws.com", - "https://sns.sa-east-1.amazonaws.com", - "https://sqs.sa-east-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.sa-east-1.amazonaws.com", - "https://kms.sa-east-1.amazonaws.com", - "https://dynamodb.sa-east-1.amazonaws.com", - ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature}, - "https://autoscaling.sa-east-1.amazonaws.com", - ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature}, - "", - "https://sts.amazonaws.com", - "https://cloudformation.sa-east-1.amazonaws.com", - "https://elasticache.sa-east-1.amazonaws.com", -} - -var CNNorth1 = Region{ - "cn-north-1", - "https://ec2.cn-north-1.amazonaws.com.cn", - "https://s3.cn-north-1.amazonaws.com.cn", - "", - true, - true, - "", - "https://sns.cn-north-1.amazonaws.com.cn", - "https://sqs.cn-north-1.amazonaws.com.cn", - "", - "https://iam.cn-north-1.amazonaws.com.cn", - "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn", - "", - "https://dynamodb.cn-north-1.amazonaws.com.cn", - ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature}, - "https://autoscaling.cn-north-1.amazonaws.com.cn", - ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature}, - "", - "https://sts.cn-north-1.amazonaws.com.cn", - "", - "", -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry.go deleted file mode 100644 index bea964b9..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry.go +++ /dev/null @@ -1,136 +0,0 @@ -package aws - -import ( - "math/rand" - "net" - "net/http" - "time" -) - -const ( - maxDelay = 20 * time.Second - defaultScale = 300 * time.Millisecond - throttlingScale = 500 * time.Millisecond - throttlingScaleRange = throttlingScale / 4 - defaultMaxRetries = 3 - dynamoDBScale = 25 * time.Millisecond - dynamoDBMaxRetries = 10 -) - -// A RetryPolicy encapsulates a strategy for implementing client retries. -// -// Default implementations are provided which match the AWS SDKs. -type RetryPolicy interface { - // ShouldRetry returns whether a client should retry a failed request. - ShouldRetry(target string, r *http.Response, err error, numRetries int) bool - - // Delay returns the time a client should wait before issuing a retry. - Delay(target string, r *http.Response, err error, numRetries int) time.Duration -} - -// DefaultRetryPolicy implements the AWS SDK default retry policy. -// -// It will retry up to 3 times, and uses an exponential backoff with a scale -// factor of 300ms (300ms, 600ms, 1200ms). If the retry is because of -// throttling, the delay will also include some randomness. -// -// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L90. -type DefaultRetryPolicy struct { -} - -// ShouldRetry implements the RetryPolicy ShouldRetry method. -func (policy DefaultRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { - return shouldRetry(r, err, numRetries, defaultMaxRetries) -} - -// Delay implements the RetryPolicy Delay method. -func (policy DefaultRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { - scale := defaultScale - if err, ok := err.(*Error); ok && isThrottlingException(err) { - scale = throttlingScale + time.Duration(rand.Int63n(int64(throttlingScaleRange))) - } - return exponentialBackoff(numRetries, scale) -} - -// DynamoDBRetryPolicy implements the AWS SDK DynamoDB retry policy. -// -// It will retry up to 10 times, and uses an exponential backoff with a scale -// factor of 25ms (25ms, 50ms, 100ms, ...). -// -// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L103. -type DynamoDBRetryPolicy struct { -} - -// ShouldRetry implements the RetryPolicy ShouldRetry method. -func (policy DynamoDBRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { - return shouldRetry(r, err, numRetries, dynamoDBMaxRetries) -} - -// Delay implements the RetryPolicy Delay method. -func (policy DynamoDBRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { - return exponentialBackoff(numRetries, dynamoDBScale) -} - -// NeverRetryPolicy never retries requests and returns immediately on failure. -type NeverRetryPolicy struct { -} - -// ShouldRetry implements the RetryPolicy ShouldRetry method. -func (policy NeverRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { - return false -} - -// Delay implements the RetryPolicy Delay method. -func (policy NeverRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { - return time.Duration(0) -} - -// shouldRetry determines if we should retry the request. -// -// See http://docs.aws.amazon.com/general/latest/gr/api-retries.html. -func shouldRetry(r *http.Response, err error, numRetries int, maxRetries int) bool { - // Once we've exceeded the max retry attempts, game over. - if numRetries >= maxRetries { - return false - } - - // Always retry temporary network errors. - if err, ok := err.(net.Error); ok && err.Temporary() { - return true - } - - // Always retry 5xx responses. - if r != nil && r.StatusCode >= 500 { - return true - } - - // Always retry throttling exceptions. - if err, ok := err.(ServiceError); ok && isThrottlingException(err) { - return true - } - - // Other classes of failures indicate a problem with the request. Retrying - // won't help. - return false -} - -func exponentialBackoff(numRetries int, scale time.Duration) time.Duration { - if numRetries < 0 { - return time.Duration(0) - } - - delay := (1 << uint(numRetries)) * scale - if delay > maxDelay { - return maxDelay - } - return delay -} - -func isThrottlingException(err ServiceError) bool { - switch err.ErrorCode() { - case "Throttling", "ThrottlingException", "ProvisionedThroughputExceededException": - return true - default: - return false - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry_test.go deleted file mode 100644 index c1f10be4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/retry_test.go +++ /dev/null @@ -1,303 +0,0 @@ -package aws - -import ( - "math/rand" - "net" - "net/http" - "testing" - "time" -) - -type testInput struct { - res *http.Response - err error - numRetries int -} - -type testResult struct { - shouldRetry bool - delay time.Duration -} - -type testCase struct { - input testInput - defaultResult testResult - dynamoDBResult testResult -} - -var testCases = []testCase{ - // Test nil fields - testCase{ - input: testInput{ - err: nil, - res: nil, - numRetries: 0, - }, - defaultResult: testResult{ - shouldRetry: false, - delay: 300 * time.Millisecond, - }, - dynamoDBResult: testResult{ - shouldRetry: false, - delay: 25 * time.Millisecond, - }, - }, - // Test 3 different throttling exceptions - testCase{ - input: testInput{ - err: &Error{ - Code: "Throttling", - }, - numRetries: 0, - }, - defaultResult: testResult{ - shouldRetry: true, - delay: 617165505 * time.Nanosecond, // account for randomness with known seed - }, - dynamoDBResult: testResult{ - shouldRetry: true, - delay: 25 * time.Millisecond, - }, - }, - testCase{ - input: testInput{ - err: &Error{ - Code: "ThrottlingException", - }, - numRetries: 0, - }, - defaultResult: testResult{ - shouldRetry: true, - delay: 579393152 * time.Nanosecond, // account for randomness with known seed - }, - dynamoDBResult: testResult{ - shouldRetry: true, - delay: 25 * time.Millisecond, - }, - }, - testCase{ - input: testInput{ - err: &Error{ - Code: "ProvisionedThroughputExceededException", - }, - numRetries: 1, - }, - defaultResult: testResult{ - shouldRetry: true, - delay: 1105991654 * time.Nanosecond, // account for randomness with known seed - }, - dynamoDBResult: testResult{ - shouldRetry: true, - delay: 50 * time.Millisecond, - }, - }, - // Test a fake throttling exception - testCase{ - input: testInput{ - err: &Error{ - Code: "MyMadeUpThrottlingCode", - }, - numRetries: 0, - }, - defaultResult: testResult{ - shouldRetry: false, - delay: 300 * time.Millisecond, - }, - dynamoDBResult: testResult{ - shouldRetry: false, - delay: 25 * time.Millisecond, - }, - }, - // Test 5xx errors - testCase{ - input: testInput{ - res: &http.Response{ - StatusCode: http.StatusInternalServerError, - }, - numRetries: 1, - }, - defaultResult: testResult{ - shouldRetry: true, - delay: 600 * time.Millisecond, - }, - dynamoDBResult: testResult{ - shouldRetry: true, - delay: 50 * time.Millisecond, - }, - }, - testCase{ - input: testInput{ - res: &http.Response{ - StatusCode: http.StatusServiceUnavailable, - }, - numRetries: 1, - }, - defaultResult: testResult{ - shouldRetry: true, - delay: 600 * time.Millisecond, - }, - dynamoDBResult: testResult{ - shouldRetry: true, - delay: 50 * time.Millisecond, - }, - }, - // Test a random 400 error - testCase{ - input: testInput{ - res: &http.Response{ - StatusCode: http.StatusNotFound, - }, - numRetries: 1, - }, - defaultResult: testResult{ - shouldRetry: false, - delay: 600 * time.Millisecond, - }, - dynamoDBResult: testResult{ - shouldRetry: false, - delay: 50 * time.Millisecond, - }, - }, - // Test a temporary net.Error - testCase{ - input: testInput{ - res: &http.Response{}, - err: &net.DNSError{ - IsTimeout: true, - }, - numRetries: 2, - }, - defaultResult: testResult{ - shouldRetry: true, - delay: 1200 * time.Millisecond, - }, - dynamoDBResult: testResult{ - shouldRetry: true, - delay: 100 * time.Millisecond, - }, - }, - // Test a non-temporary net.Error - testCase{ - input: testInput{ - res: &http.Response{}, - err: &net.DNSError{ - IsTimeout: false, - }, - numRetries: 3, - }, - defaultResult: testResult{ - shouldRetry: false, - delay: 2400 * time.Millisecond, - }, - dynamoDBResult: testResult{ - shouldRetry: false, - delay: 200 * time.Millisecond, - }, - }, - // Assert failure after hitting max default retries - testCase{ - input: testInput{ - err: &Error{ - Code: "ProvisionedThroughputExceededException", - }, - numRetries: defaultMaxRetries, - }, - defaultResult: testResult{ - shouldRetry: false, - delay: 4313582352 * time.Nanosecond, // account for randomness with known seed - }, - dynamoDBResult: testResult{ - shouldRetry: true, - delay: 200 * time.Millisecond, - }, - }, - // Assert failure after hitting max DynamoDB retries - testCase{ - input: testInput{ - err: &Error{ - Code: "ProvisionedThroughputExceededException", - }, - numRetries: dynamoDBMaxRetries, - }, - defaultResult: testResult{ - shouldRetry: false, - delay: maxDelay, - }, - dynamoDBResult: testResult{ - shouldRetry: false, - delay: maxDelay, - }, - }, - // Assert we never go over the maxDelay value - testCase{ - input: testInput{ - numRetries: 25, - }, - defaultResult: testResult{ - shouldRetry: false, - delay: maxDelay, - }, - dynamoDBResult: testResult{ - shouldRetry: false, - delay: maxDelay, - }, - }, -} - -func TestDefaultRetryPolicy(t *testing.T) { - rand.Seed(0) - var policy RetryPolicy - policy = &DefaultRetryPolicy{} - for _, test := range testCases { - res := test.input.res - err := test.input.err - numRetries := test.input.numRetries - - shouldRetry := policy.ShouldRetry("", res, err, numRetries) - if shouldRetry != test.defaultResult.shouldRetry { - t.Errorf("ShouldRetry returned %v, expected %v res=%#v err=%#v numRetries=%d", shouldRetry, test.defaultResult.shouldRetry, res, err, numRetries) - } - delay := policy.Delay("", res, err, numRetries) - if delay != test.defaultResult.delay { - t.Errorf("Delay returned %v, expected %v res=%#v err=%#v numRetries=%d", delay, test.defaultResult.delay, res, err, numRetries) - } - } -} - -func TestDynamoDBRetryPolicy(t *testing.T) { - var policy RetryPolicy - policy = &DynamoDBRetryPolicy{} - for _, test := range testCases { - res := test.input.res - err := test.input.err - numRetries := test.input.numRetries - - shouldRetry := policy.ShouldRetry("", res, err, numRetries) - if shouldRetry != test.dynamoDBResult.shouldRetry { - t.Errorf("ShouldRetry returned %v, expected %v res=%#v err=%#v numRetries=%d", shouldRetry, test.dynamoDBResult.shouldRetry, res, err, numRetries) - } - delay := policy.Delay("", res, err, numRetries) - if delay != test.dynamoDBResult.delay { - t.Errorf("Delay returned %v, expected %v res=%#v err=%#v numRetries=%d", delay, test.dynamoDBResult.delay, res, err, numRetries) - } - } -} - -func TestNeverRetryPolicy(t *testing.T) { - var policy RetryPolicy - policy = &NeverRetryPolicy{} - for _, test := range testCases { - res := test.input.res - err := test.input.err - numRetries := test.input.numRetries - - shouldRetry := policy.ShouldRetry("", res, err, numRetries) - if shouldRetry { - t.Errorf("ShouldRetry returned %v, expected %v res=%#v err=%#v numRetries=%d", shouldRetry, false, res, err, numRetries) - } - delay := policy.Delay("", res, err, numRetries) - if delay != time.Duration(0) { - t.Errorf("Delay returned %v, expected %v res=%#v err=%#v numRetries=%d", delay, time.Duration(0), res, err, numRetries) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign.go deleted file mode 100644 index 5875beee..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign.go +++ /dev/null @@ -1,402 +0,0 @@ -package aws - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "path" - "sort" - "strings" - "time" -) - -type V2Signer struct { - auth Auth - service ServiceInfo - host string -} - -var b64 = base64.StdEncoding - -func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) { - u, err := url.Parse(service.Endpoint) - if err != nil { - return nil, err - } - return &V2Signer{auth: auth, service: service, host: u.Host}, nil -} - -func (s *V2Signer) Sign(method, path string, params map[string]string) { - params["AWSAccessKeyId"] = s.auth.AccessKey - params["SignatureVersion"] = "2" - params["SignatureMethod"] = "HmacSHA256" - if s.auth.Token() != "" { - params["SecurityToken"] = s.auth.Token() - } - - // AWS specifies that the parameters in a signed request must - // be provided in the natural order of the keys. This is distinct - // from the natural order of the encoded value of key=value. - // Percent and gocheck.Equals affect the sorting order. - var keys, sarray []string - for k, _ := range params { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - sarray = append(sarray, Encode(k)+"="+Encode(params[k])) - } - joined := strings.Join(sarray, "&") - payload := method + "\n" + s.host + "\n" + path + "\n" + joined - hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} - -// Common date formats for signing requests -const ( - ISO8601BasicFormat = "20060102T150405Z" - ISO8601BasicFormatShort = "20060102" -) - -type Route53Signer struct { - auth Auth -} - -func NewRoute53Signer(auth Auth) *Route53Signer { - return &Route53Signer{auth: auth} -} - -// Creates the authorize signature based on the date stamp and secret key -func (s *Route53Signer) getHeaderAuthorize(message string) string { - hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey)) - hmacSha256.Write([]byte(message)) - cryptedString := hmacSha256.Sum(nil) - - return base64.StdEncoding.EncodeToString(cryptedString) -} - -// Adds all the required headers for AWS Route53 API to the request -// including the authorization -func (s *Route53Signer) Sign(req *http.Request) { - date := time.Now().UTC().Format(time.RFC1123) - delete(req.Header, "Date") - req.Header.Set("Date", date) - - authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s", - s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date)) - - req.Header.Set("Host", req.Host) - req.Header.Set("X-Amzn-Authorization", authHeader) - req.Header.Set("Content-Type", "application/xml") - if s.auth.Token() != "" { - req.Header.Set("X-Amz-Security-Token", s.auth.Token()) - } -} - -/* -The V4Signer encapsulates all of the functionality to sign a request with the AWS -Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) -*/ -type V4Signer struct { - auth Auth - serviceName string - region Region - // Add the x-amz-content-sha256 header - IncludeXAmzContentSha256 bool -} - -/* -Return a new instance of a V4Signer capable of signing AWS requests. -*/ -func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer { - return &V4Signer{ - auth: auth, - serviceName: serviceName, - region: region, - IncludeXAmzContentSha256: false, - } -} - -/* -Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) - -The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date" -or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires -the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from -the request.Host. - -The signed request will include a new "Authorization" header indicating that the request has been signed. - -Any changes to the request after signing the request will invalidate the signature. -*/ -func (s *V4Signer) Sign(req *http.Request) { - req.Header.Set("host", req.Host) // host header must be included as a signed header - t := s.requestTime(req) // Get request time - - payloadHash := "" - - if _, ok := req.Form["X-Amz-Expires"]; ok { - // We are authenticating the the request by using query params - // (also known as pre-signing a url, http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) - payloadHash = "UNSIGNED-PAYLOAD" - req.Header.Del("x-amz-date") - - req.Form["X-Amz-SignedHeaders"] = []string{s.signedHeaders(req.Header)} - req.Form["X-Amz-Algorithm"] = []string{"AWS4-HMAC-SHA256"} - req.Form["X-Amz-Credential"] = []string{s.auth.AccessKey + "/" + s.credentialScope(t)} - req.Form["X-Amz-Date"] = []string{t.Format(ISO8601BasicFormat)} - req.URL.RawQuery = req.Form.Encode() - } else { - payloadHash = s.payloadHash(req) - if s.IncludeXAmzContentSha256 { - req.Header.Set("x-amz-content-sha256", payloadHash) // x-amz-content-sha256 contains the payload hash - } - } - creq := s.canonicalRequest(req, payloadHash) // Build canonical request - sts := s.stringToSign(t, creq) // Build string to sign - signature := s.signature(t, sts) // Calculate the AWS Signature Version 4 - auth := s.authorization(req.Header, t, signature) // Create Authorization header value - - if _, ok := req.Form["X-Amz-Expires"]; ok { - req.Form["X-Amz-Signature"] = []string{signature} - } else { - req.Header.Set("Authorization", auth) // Add Authorization header to request - } - return -} - -/* -requestTime method will parse the time from the request "x-amz-date" or "date" headers. -If the "x-amz-date" header is present, that will take priority over the "date" header. -If neither header is defined or we are unable to parse either header as a valid date -then we will create a new "x-amz-date" header with the current time. -*/ -func (s *V4Signer) requestTime(req *http.Request) time.Time { - - // Get "x-amz-date" header - date := req.Header.Get("x-amz-date") - - // Attempt to parse as ISO8601BasicFormat - t, err := time.Parse(ISO8601BasicFormat, date) - if err == nil { - return t - } - - // Attempt to parse as http.TimeFormat - t, err = time.Parse(http.TimeFormat, date) - if err == nil { - req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) - return t - } - - // Get "date" header - date = req.Header.Get("date") - - // Attempt to parse as http.TimeFormat - t, err = time.Parse(http.TimeFormat, date) - if err == nil { - return t - } - - // Create a current time header to be used - t = time.Now().UTC() - req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) - return t -} - -/* -canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S) - - CanonicalRequest = - HTTPRequestMethod + '\n' + - CanonicalURI + '\n' + - CanonicalQueryString + '\n' + - CanonicalHeaders + '\n' + - SignedHeaders + '\n' + - HexEncode(Hash(Payload)) - -payloadHash is optional; use the empty string and it will be calculated from the request -*/ -func (s *V4Signer) canonicalRequest(req *http.Request, payloadHash string) string { - if payloadHash == "" { - payloadHash = s.payloadHash(req) - } - c := new(bytes.Buffer) - fmt.Fprintf(c, "%s\n", req.Method) - fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL)) - fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL)) - fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header)) - fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header)) - fmt.Fprintf(c, "%s", payloadHash) - return c.String() -} - -func (s *V4Signer) canonicalURI(u *url.URL) string { - u = &url.URL{Path: u.Path} - canonicalPath := u.String() - - slash := strings.HasSuffix(canonicalPath, "/") - canonicalPath = path.Clean(canonicalPath) - - if canonicalPath == "" || canonicalPath == "." { - canonicalPath = "/" - } - - if canonicalPath != "/" && slash { - canonicalPath += "/" - } - - return canonicalPath -} - -func (s *V4Signer) canonicalQueryString(u *url.URL) string { - var a []string - for k, vs := range u.Query() { - k = url.QueryEscape(k) - for _, v := range vs { - if v == "" { - a = append(a, k+"=") - } else { - v = url.QueryEscape(v) - a = append(a, k+"="+v) - } - } - } - sort.Strings(a) - return strings.Join(a, "&") -} - -func (s *V4Signer) canonicalHeaders(h http.Header) string { - i, a, lowerCase := 0, make([]string, len(h)), make(map[string][]string) - - for k, v := range h { - lowerCase[strings.ToLower(k)] = v - } - - var keys []string - for k := range lowerCase { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := lowerCase[k] - for j, w := range v { - v[j] = strings.Trim(w, " ") - } - sort.Strings(v) - a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",") - i++ - } - return strings.Join(a, "\n") -} - -func (s *V4Signer) signedHeaders(h http.Header) string { - i, a := 0, make([]string, len(h)) - for k, _ := range h { - a[i] = strings.ToLower(k) - i++ - } - sort.Strings(a) - return strings.Join(a, ";") -} - -func (s *V4Signer) payloadHash(req *http.Request) string { - var b []byte - if req.Body == nil { - b = []byte("") - } else { - var err error - b, err = ioutil.ReadAll(req.Body) - if err != nil { - // TODO: I REALLY DON'T LIKE THIS PANIC!!!! - panic(err) - } - } - req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) - return s.hash(string(b)) -} - -/* -stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu) - - StringToSign = - Algorithm + '\n' + - RequestDate + '\n' + - CredentialScope + '\n' + - HexEncode(Hash(CanonicalRequest)) -*/ -func (s *V4Signer) stringToSign(t time.Time, creq string) string { - w := new(bytes.Buffer) - fmt.Fprint(w, "AWS4-HMAC-SHA256\n") - fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat)) - fmt.Fprintf(w, "%s\n", s.credentialScope(t)) - fmt.Fprintf(w, "%s", s.hash(creq)) - return w.String() -} - -func (s *V4Signer) credentialScope(t time.Time) string { - return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName) -} - -/* -signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1) - - signature = HexEncode(HMAC(derived-signing-key, string-to-sign)) -*/ -func (s *V4Signer) signature(t time.Time, sts string) string { - h := s.hmac(s.derivedKey(t), []byte(sts)) - return fmt.Sprintf("%x", h) -} - -/* -derivedKey method derives a signing key to be used for signing a request. - - kSecret = Your AWS Secret Access Key - kDate = HMAC("AWS4" + kSecret, Date) - kRegion = HMAC(kDate, Region) - kService = HMAC(kRegion, Service) - kSigning = HMAC(kService, "aws4_request") -*/ -func (s *V4Signer) derivedKey(t time.Time) []byte { - h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort))) - h = s.hmac(h, []byte(s.region.Name)) - h = s.hmac(h, []byte(s.serviceName)) - h = s.hmac(h, []byte("aws4_request")) - return h -} - -/* -authorization method generates the authorization header value. -*/ -func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string { - w := new(bytes.Buffer) - fmt.Fprint(w, "AWS4-HMAC-SHA256 ") - fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t)) - fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header)) - fmt.Fprintf(w, "Signature=%s", signature) - return w.String() -} - -// hash method calculates the sha256 hash for a given string -func (s *V4Signer) hash(in string) string { - h := sha256.New() - fmt.Fprintf(h, "%s", in) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -// hmac method calculates the sha256 hmac for a given slice of bytes -func (s *V4Signer) hmac(key, data []byte) []byte { - h := hmac.New(sha256.New, key) - h.Write(data) - return h.Sum(nil) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign_test.go deleted file mode 100644 index 0f01bce3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/aws/sign_test.go +++ /dev/null @@ -1,569 +0,0 @@ -package aws_test - -import ( - "fmt" - "github.com/AdRoll/goamz/aws" - "gopkg.in/check.v1" - "net/http" - "strings" - "time" -) - -var _ = check.Suite(&V4SignerSuite{}) - -type V4SignerSuite struct { - auth aws.Auth - region aws.Region - cases []V4SignerSuiteCase -} - -type V4SignerSuiteCase struct { - label string - request V4SignerSuiteCaseRequest - canonicalRequest string - stringToSign string - signature string - authorization string -} - -type V4SignerSuiteCaseRequest struct { - method string - host string - url string - headers []string - body string -} - -func (s *V4SignerSuite) SetUpSuite(c *check.C) { - s.auth = aws.Auth{AccessKey: "AKIDEXAMPLE", SecretKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"} - s.region = aws.USEast - - // Test cases from the Signature Version 4 Test Suite (http://goo.gl/nguvs0) - s.cases = append(s.cases, - - // get-header-key-duplicate - V4SignerSuiteCase{ - label: "get-header-key-duplicate", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/", - headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar", "zoo:foobar", "zoo:zoobar"}, - }, - canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:foobar,zoobar,zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3c52f0eaae2b61329c0a332e3fa15842a37bc5812cf4d80eb64784308850e313", - signature: "54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed", - }, - - // get-header-value-order - V4SignerSuiteCase{ - label: "get-header-value-order", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/", - headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p:z", "p:a", "p:p", "p:a"}, - }, - canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:a,a,p,z\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n94c0389fefe0988cbbedc8606f0ca0b485b48da010d09fc844b45b697c8924fe", - signature: "d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d", - }, - - // get-header-value-trim - V4SignerSuiteCase{ - label: "get-header-value-trim", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/", - headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p: phfft "}, - }, - canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:phfft\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndddd1902add08da1ac94782b05f9278c08dc7468db178a84f8950d93b30b1f35", - signature: "debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592", - }, - - // get-empty - V4SignerSuiteCase{ - label: "get-relative-relative", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", - signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - }, - - // get-single-relative - V4SignerSuiteCase{ - label: "get-relative-relative", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/.", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", - signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - }, - - // get-multiple-relative - V4SignerSuiteCase{ - label: "get-relative-relative", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/./././", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", - signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - }, - - // get-relative-relative - V4SignerSuiteCase{ - label: "get-relative-relative", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/foo/bar/../..", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", - signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - }, - - // get-relative - V4SignerSuiteCase{ - label: "get-relative", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/foo/..", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", - signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - }, - - // get-slash-dot-slash - V4SignerSuiteCase{ - label: "get-slash-dot-slash", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/./", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", - signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - }, - - // get-slash-pointless-dot - V4SignerSuiteCase{ - label: "get-slash-pointless-dot", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/./foo", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n8021a97572ee460f87ca67f4e8c0db763216d84715f5424a843a5312a3321e2d", - signature: "910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a", - }, - - // get-slash - V4SignerSuiteCase{ - label: "get-slash", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "//", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", - signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - }, - - // get-slashes - V4SignerSuiteCase{ - label: "get-slashes", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "//foo//", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/foo/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n6bb4476ee8745730c9cb79f33a0c70baa6d8af29c0077fa12e4e8f1dd17e7098", - signature: "b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19", - }, - - // get-space - V4SignerSuiteCase{ - label: "get-space", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/%20/foo", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/%20/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n69c45fb9fe3fd76442b5086e50b2e9fec8298358da957b293ef26e506fdfb54b", - signature: "f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374", - }, - - // get-unreserved - V4SignerSuiteCase{ - label: "get-unreserved", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndf63ee3247c0356c696a3b21f8d8490b01fa9cd5bc6550ef5ef5f4636b7b8901", - signature: "830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e", - }, - - // get-utf8 - V4SignerSuiteCase{ - label: "get-utf8", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/%E1%88%B4", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/%E1%88%B4\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n27ba31df5dbc6e063d8f87d62eb07143f7f271c5330a917840586ac1c85b6f6b", - signature: "8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74", - }, - - // get-vanilla-empty-query-key - V4SignerSuiteCase{ - label: "get-vanilla-empty-query-key", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/?foo=bar", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n0846c2945b0832deb7a463c66af5c4f8bd54ec28c438e67a214445b157c9ddf8", - signature: "56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f", - }, - - // get-vanilla-query-order-key-case - V4SignerSuiteCase{ - label: "get-vanilla-query-order-key-case", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/?foo=Zoo&foo=aha", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\nfoo=Zoo&foo=aha\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ne25f777ba161a0f1baf778a87faf057187cf5987f17953320e3ca399feb5f00d", - signature: "be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09", - }, - - // get-vanilla-query-order-key - V4SignerSuiteCase{ - label: "get-vanilla-query-order-key", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/?a=foo&b=foo", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\na=foo&b=foo\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n2f23d14fe13caebf6dfda346285c6d9c14f49eaca8f5ec55c627dd7404f7a727", - signature: "0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b", - }, - - // get-vanilla-query-order-value - V4SignerSuiteCase{ - label: "get-vanilla-query-order-value", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/?foo=b&foo=a", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\nfoo=a&foo=b\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n33dffc220e89131f8f6157a35c40903daa658608d9129ff9489e5cf5bbd9b11b", - signature: "feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc", - }, - - // get-vanilla-query-unreserved - V4SignerSuiteCase{ - label: "get-vanilla-query-unreserved", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nd2578f3156d4c9d180713d1ff20601d8a3eed0dd35447d24603d7d67414bd6b5", - signature: "f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb", - }, - - // get-vanilla-query - V4SignerSuiteCase{ - label: "get-vanilla-query", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", - signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - }, - - // get-vanilla-ut8-query - V4SignerSuiteCase{ - label: "get-vanilla-ut8-query", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/?ሴ=bar", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n%E1%88%B4=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nde5065ff39c131e6c2e2bd19cd9345a794bf3b561eab20b8d97b2093fc2a979e", - signature: "6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c", - }, - - // get-vanilla - V4SignerSuiteCase{ - label: "get-vanilla", - request: V4SignerSuiteCaseRequest{ - method: "GET", - host: "host.foo.com", - url: "/", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1", - signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470", - }, - - // post-header-key-case - V4SignerSuiteCase{ - label: "post-header-key-case", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/", - headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4", - signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726", - }, - - // post-header-key-sort - V4SignerSuiteCase{ - label: "post-header-key-sort", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/", - headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar"}, - }, - canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n34e1bddeb99e76ee01d63b5e28656111e210529efeec6cdfd46a48e4c734545d", - signature: "b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a", - }, - - // post-header-value-case - V4SignerSuiteCase{ - label: "post-header-value-case", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/", - headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "zoo:ZOOBAR"}, - }, - canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:ZOOBAR\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3aae6d8274b8c03e2cc96fc7d6bda4b9bd7a0a184309344470b2c96953e124aa", - signature: "273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7", - }, - - // post-vanilla-empty-query-value - V4SignerSuiteCase{ - label: "post-vanilla-empty-query-value", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/?foo=bar", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e", - signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92", - }, - - // post-vanilla-query - V4SignerSuiteCase{ - label: "post-vanilla-query", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/?foo=bar", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e", - signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92", - }, - - // post-vanilla - V4SignerSuiteCase{ - label: "post-vanilla", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/", - headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - }, - canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4", - signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726", - }, - - // post-x-www-form-urlencoded-parameters - V4SignerSuiteCase{ - label: "post-x-www-form-urlencoded-parameters", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/", - headers: []string{"Content-Type:application/x-www-form-urlencoded; charset=utf8", "Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - body: "foo=bar", - }, - canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded; charset=utf8\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nc4115f9e54b5cecf192b1eaa23b8e88ed8dc5391bd4fde7b3fff3d9c9fe0af1f", - signature: "b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71", - }, - - // post-x-www-form-urlencoded - V4SignerSuiteCase{ - label: "post-x-www-form-urlencoded", - request: V4SignerSuiteCaseRequest{ - method: "POST", - host: "host.foo.com", - url: "/", - headers: []string{"Content-Type:application/x-www-form-urlencoded", "Date:Mon, 09 Sep 2011 23:36:00 GMT"}, - body: "foo=bar", - }, - canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a", - stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n4c5c6e4b52fb5fb947a8733982a8a5a61b14f04345cbfe6e739236c76dd48f74", - signature: "5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc", - authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc", - }, - ) -} - -func (s *V4SignerSuite) TestCases(c *check.C) { - signer := aws.NewV4Signer(s.auth, "host", s.region) - - for _, testCase := range s.cases { - - req, err := http.NewRequest(testCase.request.method, "http://"+testCase.request.host+testCase.request.url, strings.NewReader(testCase.request.body)) - c.Assert(err, check.IsNil, check.Commentf("Testcase: %s", testCase.label)) - for _, v := range testCase.request.headers { - h := strings.SplitN(v, ":", 2) - req.Header.Add(h[0], h[1]) - } - req.Header.Set("host", req.Host) - - t := signer.RequestTime(req) - - canonicalRequest := signer.CanonicalRequest(req) - c.Check(canonicalRequest, check.Equals, testCase.canonicalRequest, check.Commentf("Testcase: %s", testCase.label)) - - stringToSign := signer.StringToSign(t, canonicalRequest) - c.Check(stringToSign, check.Equals, testCase.stringToSign, check.Commentf("Testcase: %s", testCase.label)) - - signature := signer.Signature(t, stringToSign) - c.Check(signature, check.Equals, testCase.signature, check.Commentf("Testcase: %s", testCase.label)) - - authorization := signer.Authorization(req.Header, t, signature) - c.Check(authorization, check.Equals, testCase.authorization, check.Commentf("Testcase: %s", testCase.label)) - - signer.Sign(req) - c.Check(req.Header.Get("Authorization"), check.Equals, testCase.authorization, check.Commentf("Testcase: %s", testCase.label)) - } -} - -func ExampleV4Signer() { - // Get auth from env vars - auth, err := aws.EnvAuth() - if err != nil { - fmt.Println(err) - } - - // Create a signer with the auth, name of the service, and aws region - signer := aws.NewV4Signer(auth, "dynamodb", aws.USEast) - - // Create a request - req, err := http.NewRequest("POST", aws.USEast.DynamoDBEndpoint, strings.NewReader("sample_request")) - if err != nil { - fmt.Println(err) - } - - // Date or x-amz-date header is required to sign a request - req.Header.Add("Date", time.Now().UTC().Format(http.TimeFormat)) - - // Sign the request - signer.Sign(req) - - // Issue signed request - http.DefaultClient.Do(req) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront.go deleted file mode 100644 index b845d3c5..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront.go +++ /dev/null @@ -1,143 +0,0 @@ -package cloudfront - -import ( - "crypto" - "crypto/rsa" - "crypto/sha1" - "encoding/base64" - "encoding/json" - "fmt" - "github.com/AdRoll/goamz/aws" - "net/url" - "strconv" - "strings" - "time" -) - -type CloudFront struct { - BaseURL string - keyPairId string - key *rsa.PrivateKey -} - -var base64Replacer = strings.NewReplacer("=", "_", "+", "-", "/", "~") - -func NewKeyLess(auth aws.Auth, baseurl string) *CloudFront { - return &CloudFront{keyPairId: auth.AccessKey, BaseURL: baseurl} -} - -func New(baseurl string, key *rsa.PrivateKey, keyPairId string) *CloudFront { - return &CloudFront{ - BaseURL: baseurl, - keyPairId: keyPairId, - key: key, - } -} - -type epochTime struct { - EpochTime int64 `json:"AWS:EpochTime"` -} - -type condition struct { - DateLessThan epochTime -} - -type statement struct { - Resource string - Condition condition -} - -type policy struct { - Statement []statement -} - -func buildPolicy(resource string, expireTime time.Time) ([]byte, error) { - p := &policy{ - Statement: []statement{ - statement{ - Resource: resource, - Condition: condition{ - DateLessThan: epochTime{ - EpochTime: expireTime.Truncate(time.Millisecond).Unix(), - }, - }, - }, - }, - } - - return json.Marshal(p) -} - -func (cf *CloudFront) generateSignature(policy []byte) (string, error) { - hash := sha1.New() - _, err := hash.Write(policy) - if err != nil { - return "", err - } - - hashed := hash.Sum(nil) - var signed []byte - if cf.key.Validate() == nil { - signed, err = rsa.SignPKCS1v15(nil, cf.key, crypto.SHA1, hashed) - if err != nil { - return "", err - } - } else { - signed = hashed - } - encoded := base64Replacer.Replace(base64.StdEncoding.EncodeToString(signed)) - - return encoded, nil -} - -// Creates a signed url using RSAwithSHA1 as specified by -// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-signature -func (cf *CloudFront) CannedSignedURL(path, queryString string, expires time.Time) (string, error) { - resource := cf.BaseURL + path - if queryString != "" { - resource = path + "?" + queryString - } - - policy, err := buildPolicy(resource, expires) - if err != nil { - return "", err - } - - signature, err := cf.generateSignature(policy) - if err != nil { - return "", err - } - - // TOOD: Do this once - uri, err := url.Parse(cf.BaseURL) - if err != nil { - return "", err - } - - uri.RawQuery = queryString - if queryString != "" { - uri.RawQuery += "&" - } - - expireTime := expires.Truncate(time.Millisecond).Unix() - - uri.Path = path - uri.RawQuery += fmt.Sprintf("Expires=%d&Signature=%s&Key-Pair-Id=%s", expireTime, signature, cf.keyPairId) - - return uri.String(), nil -} - -func (cloudfront *CloudFront) SignedURL(path, querystrings string, expires time.Time) string { - policy := `{"Statement":[{"Resource":"` + path + "?" + querystrings + `,"Condition":{"DateLessThan":{"AWS:EpochTime":` + strconv.FormatInt(expires.Truncate(time.Millisecond).Unix(), 10) + `}}}]}` - - hash := sha1.New() - hash.Write([]byte(policy)) - b := hash.Sum(nil) - he := base64.StdEncoding.EncodeToString(b) - - policySha1 := he - - url := cloudfront.BaseURL + path + "?" + querystrings + "&Expires=" + strconv.FormatInt(expires.Unix(), 10) + "&Signature=" + policySha1 + "&Key-Pair-Id=" + cloudfront.keyPairId - - return url -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront_test.go deleted file mode 100644 index 63744d1c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/cloudfront_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package cloudfront - -import ( - "crypto/x509" - "encoding/pem" - "io/ioutil" - "net/url" - "testing" - "time" -) - -func TestSignedCannedURL(t *testing.T) { - rawKey, err := ioutil.ReadFile("testdata/key.pem") - if err != nil { - t.Fatal(err) - } - - pemKey, _ := pem.Decode(rawKey) - privateKey, err := x509.ParsePKCS1PrivateKey(pemKey.Bytes) - if err != nil { - t.Fatal(err) - } - - cf := &CloudFront{ - key: privateKey, - keyPairId: "test-key-pair-1231245", - BaseURL: "https://cloudfront.com", - } - - expireTime, err := time.Parse(time.RFC3339, "2014-03-28T14:00:21Z") - if err != nil { - t.Fatal(err) - } - - query := make(url.Values) - query.Add("test", "value") - - uri, err := cf.CannedSignedURL("test", "test=value", expireTime) - if err != nil { - t.Fatal(err) - } - - parsed, err := url.Parse(uri) - if err != nil { - t.Fatal(err) - } - - signature := parsed.Query().Get("Signature") - if signature == "" { - t.Fatal("Encoded signature is empty") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/testdata/key.pub b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/testdata/key.pub deleted file mode 100644 index 7d0b5b4d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/cloudfront/testdata/key.pub +++ /dev/null @@ -1,6 +0,0 @@ ------BEGIN PUBLIC KEY----- -MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC0yMzp9DkPAE99DhsEaGkqougL -vtmDKri4bZj0fFjmGmjyyjz9hlrsr87LHVWzH/7igK7040HG1UqypX3ijtJa9+6B -KHwBBctboU3y4GfwFwVAOumY9UytFpyPlgUFrffZLQAywKkT24OgcfEj0G5kiQn7 -60wFnmSUtOuITo708QIDAQAB ------END PUBLIC KEY----- diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/export_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/export_test.go deleted file mode 100644 index 80f62558..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/export_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package s3 - -import ( - "github.com/AdRoll/goamz/aws" -) - -var originalStrategy = attempts - -func SetAttemptStrategy(s *aws.AttemptStrategy) { - if s == nil { - attempts = originalStrategy - } else { - attempts = *s - } -} - -func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) { - sign(auth, method, path, params, headers) -} - -func SetListPartsMax(n int) { - listPartsMax = n -} - -func SetListMultiMax(n int) { - listMultiMax = n -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle.go deleted file mode 100644 index d9281261..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle.go +++ /dev/null @@ -1,202 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "encoding/xml" - "net/url" - "strconv" - "time" -) - -// Implements an interface for s3 bucket lifecycle configuration -// See goo.gl/d0bbDf for details. - -const ( - LifecycleRuleStatusEnabled = "Enabled" - LifecycleRuleStatusDisabled = "Disabled" - LifecycleRuleDateFormat = "2006-01-02" - StorageClassGlacier = "GLACIER" -) - -type Expiration struct { - Days *uint `xml:"Days,omitempty"` - Date string `xml:"Date,omitempty"` -} - -// Returns Date as a time.Time. -func (r *Expiration) ParseDate() (time.Time, error) { - return time.Parse(LifecycleRuleDateFormat, r.Date) -} - -type Transition struct { - Days *uint `xml:"Days,omitempty"` - Date string `xml:"Date,omitempty"` - StorageClass string `xml:"StorageClass"` -} - -// Returns Date as a time.Time. -func (r *Transition) ParseDate() (time.Time, error) { - return time.Parse(LifecycleRuleDateFormat, r.Date) -} - -type NoncurrentVersionExpiration struct { - Days *uint `xml:"NoncurrentDays,omitempty"` -} - -type NoncurrentVersionTransition struct { - Days *uint `xml:"NoncurrentDays,omitempty"` - StorageClass string `xml:"StorageClass"` -} - -type LifecycleRule struct { - ID string `xml:"ID"` - Prefix string `xml:"Prefix"` - Status string `xml:"Status"` - NoncurrentVersionTransition *NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` - NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` - Transition *Transition `xml:"Transition,omitempty"` - Expiration *Expiration `xml:"Expiration,omitempty"` -} - -// Create a lifecycle rule with arbitrary identifier id and object name prefix -// for which the rules should apply. -func NewLifecycleRule(id, prefix string) *LifecycleRule { - rule := &LifecycleRule{ - ID: id, - Prefix: prefix, - Status: LifecycleRuleStatusEnabled, - } - return rule -} - -// Adds a transition rule in days. Overwrites any previous transition rule. -func (r *LifecycleRule) SetTransitionDays(days uint) { - r.Transition = &Transition{ - Days: &days, - StorageClass: StorageClassGlacier, - } -} - -// Adds a transition rule as a date. Overwrites any previous transition rule. -func (r *LifecycleRule) SetTransitionDate(date time.Time) { - r.Transition = &Transition{ - Date: date.Format(LifecycleRuleDateFormat), - StorageClass: StorageClassGlacier, - } -} - -// Adds an expiration rule in days. Overwrites any previous expiration rule. -// Days must be > 0. -func (r *LifecycleRule) SetExpirationDays(days uint) { - r.Expiration = &Expiration{ - Days: &days, - } -} - -// Adds an expiration rule as a date. Overwrites any previous expiration rule. -func (r *LifecycleRule) SetExpirationDate(date time.Time) { - r.Expiration = &Expiration{ - Date: date.Format(LifecycleRuleDateFormat), - } -} - -// Adds a noncurrent version transition rule. Overwrites any previous -// noncurrent version transition rule. -func (r *LifecycleRule) SetNoncurrentVersionTransitionDays(days uint) { - r.NoncurrentVersionTransition = &NoncurrentVersionTransition{ - Days: &days, - StorageClass: StorageClassGlacier, - } -} - -// Adds a noncurrent version expiration rule. Days must be > 0. Overwrites -// any previous noncurrent version expiration rule. -func (r *LifecycleRule) SetNoncurrentVersionExpirationDays(days uint) { - r.NoncurrentVersionExpiration = &NoncurrentVersionExpiration{ - Days: &days, - } -} - -// Marks the rule as disabled. -func (r *LifecycleRule) Disable() { - r.Status = LifecycleRuleStatusDisabled -} - -// Marks the rule as enabled (default). -func (r *LifecycleRule) Enable() { - r.Status = LifecycleRuleStatusEnabled -} - -type LifecycleConfiguration struct { - XMLName xml.Name `xml:"LifecycleConfiguration"` - Rules *[]*LifecycleRule `xml:"Rule,omitempty"` -} - -// Adds a LifecycleRule to the configuration. -func (c *LifecycleConfiguration) AddRule(r *LifecycleRule) { - var rules []*LifecycleRule - if c.Rules != nil { - rules = *c.Rules - } - rules = append(rules, r) - c.Rules = &rules -} - -// Sets the bucket's lifecycle configuration. -func (b *Bucket) PutLifecycleConfiguration(c *LifecycleConfiguration) error { - doc, err := xml.Marshal(c) - if err != nil { - return err - } - - buf := makeXmlBuffer(doc) - digest := md5.New() - size, err := digest.Write(buf.Bytes()) - if err != nil { - return err - } - - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(int64(size), 10)}, - "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, - } - - req := &request{ - path: "/", - method: "PUT", - bucket: b.Name, - headers: headers, - payload: buf, - params: url.Values{"lifecycle": {""}}, - } - - return b.S3.queryV4Sign(req, nil) -} - -// Retrieves the lifecycle configuration for the bucket. AWS returns an error -// if no lifecycle found. -func (b *Bucket) GetLifecycleConfiguration() (*LifecycleConfiguration, error) { - req := &request{ - method: "GET", - bucket: b.Name, - path: "/", - params: url.Values{"lifecycle": {""}}, - } - - conf := &LifecycleConfiguration{} - err := b.S3.queryV4Sign(req, conf) - return conf, err -} - -// Delete the bucket's lifecycle configuration. -func (b *Bucket) DeleteLifecycleConfiguration() error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - params: url.Values{"lifecycle": {""}}, - } - - return b.S3.queryV4Sign(req, nil) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle_test.go deleted file mode 100644 index e43acb8f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/lifecycle_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package s3_test - -import ( - "encoding/xml" - "github.com/AdRoll/goamz/s3" - "gopkg.in/check.v1" - "io/ioutil" - "net/http" - "strings" - "time" -) - -func (s *S) TestLifecycleConfiguration(c *check.C) { - date, err := time.Parse(s3.LifecycleRuleDateFormat, "2014-09-10") - c.Check(err, check.IsNil) - - conf := &s3.LifecycleConfiguration{} - - rule := s3.NewLifecycleRule("transition-days", "/") - rule.SetTransitionDays(7) - conf.AddRule(rule) - - rule = s3.NewLifecycleRule("transition-date", "/") - rule.SetTransitionDate(date) - conf.AddRule(rule) - - rule = s3.NewLifecycleRule("expiration-days", "") - rule.SetExpirationDays(1) - conf.AddRule(rule) - - rule = s3.NewLifecycleRule("expiration-date", "") - rule.SetExpirationDate(date) - conf.AddRule(rule) - - rule = s3.NewLifecycleRule("noncurrent-transition", "") - rule.SetNoncurrentVersionTransitionDays(11) - conf.AddRule(rule) - - rule = s3.NewLifecycleRule("noncurrent-expiration", "") - rule.SetNoncurrentVersionExpirationDays(1011) - - // Test Disable() and Enable() toggling - c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusEnabled) - rule.Disable() - c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusDisabled) - rule.Enable() - c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusEnabled) - rule.Disable() - c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusDisabled) - - conf.AddRule(rule) - - doc, err := xml.MarshalIndent(conf, "", " ") - c.Check(err, check.IsNil) - - expectedDoc := ` - - transition-days - / - Enabled - - 7 - GLACIER - - - - transition-date - / - Enabled - - 2014-09-10 - GLACIER - - - - expiration-days - - Enabled - - 1 - - - - expiration-date - - Enabled - - 2014-09-10 - - - - noncurrent-transition - - Enabled - - 11 - GLACIER - - - - noncurrent-expiration - - Disabled - - 1011 - - -` - - c.Check(string(doc), check.Equals, expectedDoc) - - // Unmarshalling test - conf2 := &s3.LifecycleConfiguration{} - err = xml.Unmarshal(doc, conf2) - c.Check(err, check.IsNil) - s.checkLifecycleConfigurationEqual(c, conf, conf2) -} - -func (s *S) checkLifecycleConfigurationEqual(c *check.C, conf, conf2 *s3.LifecycleConfiguration) { - c.Check(len(*conf2.Rules), check.Equals, len(*conf.Rules)) - for i, rule := range *conf2.Rules { - confRules := *conf.Rules - c.Check(rule, check.DeepEquals, confRules[i]) - } -} - -func (s *S) checkLifecycleRequest(c *check.C, req *http.Request) { - // ?lifecycle= is the only query param - v, ok := req.Form["lifecycle"] - c.Assert(ok, check.Equals, true) - c.Assert(v, check.HasLen, 1) - c.Assert(v[0], check.Equals, "") - - c.Assert(req.Header["X-Amz-Date"], check.HasLen, 1) - c.Assert(req.Header["X-Amz-Date"][0], check.Not(check.Equals), "") - - // Lifecycle methods require V4 auth - usesV4 := strings.HasPrefix(req.Header["Authorization"][0], "AWS4-HMAC-SHA256") - c.Assert(usesV4, check.Equals, true) -} - -func (s *S) TestPutLifecycleConfiguration(c *check.C) { - testServer.Response(200, nil, "") - - conf := &s3.LifecycleConfiguration{} - rule := s3.NewLifecycleRule("id", "") - rule.SetTransitionDays(7) - conf.AddRule(rule) - - doc, err := xml.Marshal(conf) - c.Check(err, check.IsNil) - - b := s.s3.Bucket("bucket") - err = b.PutLifecycleConfiguration(conf) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/bucket/") - c.Assert(req.Header["Content-Md5"], check.HasLen, 1) - c.Assert(req.Header["Content-Md5"][0], check.Not(check.Equals), "") - s.checkLifecycleRequest(c, req) - - // Check we sent the correct xml serialization - data, err := ioutil.ReadAll(req.Body) - req.Body.Close() - c.Assert(err, check.IsNil) - header := "\n" - c.Assert(string(data), check.Equals, header+string(doc)) -} - -func (s *S) TestGetLifecycleConfiguration(c *check.C) { - conf := &s3.LifecycleConfiguration{} - rule := s3.NewLifecycleRule("id", "") - rule.SetTransitionDays(7) - conf.AddRule(rule) - - doc, err := xml.Marshal(conf) - c.Check(err, check.IsNil) - - testServer.Response(200, nil, string(doc)) - - b := s.s3.Bucket("bucket") - conf2, err := b.GetLifecycleConfiguration() - c.Check(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/bucket/") - s.checkLifecycleRequest(c, req) - s.checkLifecycleConfigurationEqual(c, conf, conf2) -} - -func (s *S) TestDeleteLifecycleConfiguration(c *check.C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.DeleteLifecycleConfiguration() - c.Check(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "DELETE") - c.Assert(req.URL.Path, check.Equals, "/bucket/") - s.checkLifecycleRequest(c, req) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi.go deleted file mode 100644 index ff454265..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi.go +++ /dev/null @@ -1,502 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "errors" - "io" - "net/url" - "sort" - "strconv" - "strings" -) - -// Multi represents an unfinished multipart upload. -// -// Multipart uploads allow sending big objects in smaller chunks. -// After all parts have been sent, the upload must be explicitly -// completed by calling Complete with the list of parts. -// -// See http://goo.gl/vJfTG for an overview of multipart uploads. -type Multi struct { - Bucket *Bucket - Key string - UploadId string -} - -// That's the default. Here just for testing. -var listMultiMax = 1000 - -type listMultiResp struct { - NextKeyMarker string - NextUploadIdMarker string - IsTruncated bool - Upload []Multi - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` -} - -// ListMulti returns the list of unfinished multipart uploads in b. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. You can use prefixes to separate a bucket into different -// groupings of keys (to get the feeling of folders, for example). -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// See http://goo.gl/ePioY for details. -func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { - params := map[string][]string{ - "uploads": {""}, - "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)}, - "prefix": {prefix}, - "delimiter": {delim}, - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: b.Name, - params: params, - } - var resp listMultiResp - err := b.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, nil, err - } - for i := range resp.Upload { - multi := &resp.Upload[i] - multi.Bucket = b - multis = append(multis, multi) - } - prefixes = append(prefixes, resp.CommonPrefixes...) - if !resp.IsTruncated { - return multis, prefixes, nil - } - params["key-marker"] = []string{resp.NextKeyMarker} - params["upload-id-marker"] = []string{resp.NextUploadIdMarker} - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -// Multi returns a multipart upload handler for the provided key -// inside b. If a multipart upload exists for key, it is returned, -// otherwise a new multipart upload is initiated with contType and perm. -func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { - multis, _, err := b.ListMulti(key, "") - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - for _, m := range multis { - if m.Key == key { - return m, nil - } - } - return b.InitMulti(key, contType, perm, options) -} - -// InitMulti initializes a new multipart upload at the provided -// key inside b and returns a value for manipulating it. -// -// See http://goo.gl/XP8kL for details. -func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { - headers := map[string][]string{ - "Content-Type": {contType}, - "Content-Length": {"0"}, - "x-amz-acl": {string(perm)}, - } - options.addHeaders(headers) - params := map[string][]string{ - "uploads": {""}, - } - req := &request{ - method: "POST", - bucket: b.Name, - path: key, - headers: headers, - params: params, - } - var err error - var resp struct { - UploadId string `xml:"UploadId"` - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, &resp) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil -} - -func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { - headers := map[string][]string{ - "x-amz-copy-source": {url.QueryEscape(source)}, - } - options.addHeaders(headers) - params := map[string][]string{ - "uploadId": {m.UploadId}, - "partNumber": {strconv.FormatInt(int64(n), 10)}, - } - - sourceBucket := m.Bucket.S3.Bucket(strings.TrimRight(strings.SplitAfterN(source, "/", 2)[0], "/")) - sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 2)[1], nil) - if err != nil { - return nil, Part{}, err - } - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - } - resp := &CopyObjectResult{} - err = m.Bucket.S3.query(req, resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, Part{}, err - } - if resp.ETag == "" { - return nil, Part{}, errors.New("part upload succeeded with no ETag") - } - return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil - } - panic("unreachable") -} - -// PutPart sends part n of the multipart upload, reading all the content from r. -// Each part, except for the last one, must be at least 5MB in size. -// -// See http://goo.gl/pqZer for details. -func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64) -} - -func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(partSize, 10)}, - "Content-MD5": {md5b64}, - } - params := map[string][]string{ - "uploadId": {m.UploadId}, - "partNumber": {strconv.FormatInt(int64(n), 10)}, - } - for attempt := attempts.Start(); attempt.Next(); { - _, err := r.Seek(0, 0) - if err != nil { - return Part{}, err - } - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - payload: r, - } - err = m.Bucket.S3.prepare(req) - if err != nil { - return Part{}, err - } - resp, err := m.Bucket.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return Part{}, err - } - etag := resp.Header.Get("ETag") - if etag == "" { - return Part{}, errors.New("part upload succeeded with no ETag") - } - return Part{n, etag, partSize}, nil - } - panic("unreachable") -} - -func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { - _, err = r.Seek(0, 0) - if err != nil { - return 0, "", "", err - } - digest := md5.New() - size, err = io.Copy(digest, r) - if err != nil { - return 0, "", "", err - } - sum := digest.Sum(nil) - md5hex = hex.EncodeToString(sum) - md5b64 = base64.StdEncoding.EncodeToString(sum) - return size, md5hex, md5b64, nil -} - -type Part struct { - N int `xml:"PartNumber"` - ETag string - Size int64 -} - -type partSlice []Part - -func (s partSlice) Len() int { return len(s) } -func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } -func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type listPartsResp struct { - NextPartNumberMarker string - IsTruncated bool - Part []Part -} - -// That's the default. Here just for testing. -var listPartsMax = 1000 - -// Kept for backcompatability. See the documentation for ListPartsFull -func (m *Multi) ListParts() ([]Part, error) { - return m.ListPartsFull(0, listPartsMax) -} - -// ListParts returns the list of previously uploaded parts in m, -// ordered by part number (Only parts with higher part numbers than -// partNumberMarker will be listed). Only up to maxParts parts will be -// returned. -// -// See http://goo.gl/ePioY for details. -func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { - if maxParts > listPartsMax { - maxParts = listPartsMax - } - - params := map[string][]string{ - "uploadId": {m.UploadId}, - "max-parts": {strconv.FormatInt(int64(maxParts), 10)}, - "part-number-marker": {strconv.FormatInt(int64(partNumberMarker), 10)}, - } - var parts partSlice - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - var resp listPartsResp - err := m.Bucket.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - parts = append(parts, resp.Part...) - if !resp.IsTruncated { - sort.Sort(parts) - return parts, nil - } - params["part-number-marker"] = []string{resp.NextPartNumberMarker} - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -type ReaderAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// PutAll sends all of r via a multipart upload with parts no larger -// than partSize bytes, which must be set to at least 5MB. -// Parts previously uploaded are either reused if their checksum -// and size match the new part, or otherwise overwritten with the -// new content. -// PutAll returns all the parts of m (reused or not). -func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { - old, err := m.ListParts() - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - reuse := 0 // Index of next old part to consider reusing. - current := 1 // Part number of latest good part handled. - totalSize, err := r.Seek(0, 2) - if err != nil { - return nil, err - } - first := true // Must send at least one empty part if the file is empty. - var result []Part -NextSection: - for offset := int64(0); offset < totalSize || first; offset += partSize { - first = false - if offset+partSize > totalSize { - partSize = totalSize - offset - } - section := io.NewSectionReader(r, offset, partSize) - _, md5hex, md5b64, err := seekerInfo(section) - if err != nil { - return nil, err - } - for reuse < len(old) && old[reuse].N <= current { - // Looks like this part was already sent. - part := &old[reuse] - etag := `"` + md5hex + `"` - if part.N == current && part.Size == partSize && part.ETag == etag { - // Checksum matches. Reuse the old part. - result = append(result, *part) - current++ - continue NextSection - } - reuse++ - } - - // Part wasn't found or doesn't match. Send it. - part, err := m.putPart(current, section, partSize, md5b64) - if err != nil { - return nil, err - } - result = append(result, part) - current++ - } - return result, nil -} - -type completeUpload struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Parts completeParts `xml:"Part"` -} - -type completePart struct { - PartNumber int - ETag string -} - -type completeParts []completePart - -func (p completeParts) Len() int { return len(p) } -func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } -func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// We can't know in advance whether we'll have an Error or a -// CompleteMultipartUploadResult, so this structure is just a placeholder to -// know the name of the XML object. -type completeUploadResp struct { - XMLName xml.Name - InnerXML string `xml:",innerxml"` -} - -// Complete assembles the given previously uploaded parts into the -// final object. This operation may take several minutes. -// -// See http://goo.gl/2Z7Tw for details. -func (m *Multi) Complete(parts []Part) error { - params := map[string][]string{ - "uploadId": {m.UploadId}, - } - c := completeUpload{} - for _, p := range parts { - c.Parts = append(c.Parts, completePart{p.N, p.ETag}) - } - sort.Sort(c.Parts) - data, err := xml.Marshal(&c) - if err != nil { - return err - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "POST", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - payload: bytes.NewReader(data), - } - var resp completeUploadResp - err := m.Bucket.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - - if err != nil { - return err - } - - // A 200 error code does not guarantee that there were no errors (see - // http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html ), - // so first figure out what kind of XML "object" we are dealing with. - - if resp.XMLName.Local == "Error" { - // S3.query does the unmarshalling for us, so we can't unmarshal - // again in a different struct... So we need to duct-tape back the - // original XML back together. - fullErrorXml := "" + resp.InnerXML + "" - s3err := &Error{} - - if err := xml.Unmarshal([]byte(fullErrorXml), s3err); err != nil { - return err - } - - return s3err - } - - if resp.XMLName.Local == "CompleteMultipartUploadResult" { - // FIXME: One could probably add a CompleteFull method returning the - // actual contents of the CompleteMultipartUploadResult object. - return nil - } - - return errors.New("Invalid XML struct returned: " + resp.XMLName.Local) - } - panic("unreachable") -} - -// Abort deletes an unifinished multipart upload and any previously -// uploaded parts for it. -// -// After a multipart upload is aborted, no additional parts can be -// uploaded using it. However, if any part uploads are currently in -// progress, those part uploads might or might not succeed. As a result, -// it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// NOTE: If the described scenario happens to you, please report back to -// the goamz authors with details. In the future such retrying should be -// handled internally, but it's not clear what happens precisely (Is an -// error returned? Is the issue completely undetectable?). -// -// See http://goo.gl/dnyJw for details. -func (m *Multi) Abort() error { - params := map[string][]string{ - "uploadId": {m.UploadId}, - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "DELETE", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - err := m.Bucket.S3.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi_test.go deleted file mode 100644 index 8429d336..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/multi_test.go +++ /dev/null @@ -1,440 +0,0 @@ -package s3_test - -import ( - "encoding/xml" - "github.com/AdRoll/goamz/s3" - "gopkg.in/check.v1" - "io" - "io/ioutil" - "strings" -) - -func (s *S) TestInitMulti(c *check.C) { - testServer.Response(200, nil, InitMultiResultDump) - b := s.s3.Bucket("sample") - - metadata := make(map[string][]string) - metadata["key1"] = []string{"value1"} - metadata["key2"] = []string{"value2"} - options := s3.Options{ - SSE: true, - Meta: metadata, - ContentEncoding: "text/utf8", - CacheControl: "no-cache", - RedirectLocation: "http://github.com/AdRoll/goamz", - ContentMD5: "0000000000000000", - } - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, options) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "POST") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"text/plain"}) - c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"}) - c.Assert(req.Form["uploads"], check.DeepEquals, []string{""}) - - c.Assert(req.Header["X-Amz-Server-Side-Encryption"], check.DeepEquals, []string{"AES256"}) - c.Assert(req.Header["Content-Encoding"], check.DeepEquals, []string{"text/utf8"}) - c.Assert(req.Header["Cache-Control"], check.DeepEquals, []string{"no-cache"}) - c.Assert(req.Header["Content-Md5"], check.DeepEquals, []string{"0000000000000000"}) - c.Assert(req.Header["X-Amz-Website-Redirect-Location"], check.DeepEquals, []string{"http://github.com/AdRoll/goamz"}) - c.Assert(req.Header["X-Amz-Meta-Key1"], check.DeepEquals, []string{"value1"}) - c.Assert(req.Header["X-Amz-Meta-Key2"], check.DeepEquals, []string{"value2"}) - - c.Assert(multi.UploadId, check.Matches, "JNbR_[A-Za-z0-9.]+QQ--") -} - -func (s *S) TestMultiNoPreviousUpload(c *check.C) { - // Don't retry the NoSuchUpload error. - s.DisableRetries() - - testServer.Response(404, nil, NoSuchUploadErrorDump) - testServer.Response(200, nil, InitMultiResultDump) - - b := s.s3.Bucket("sample") - - multi, err := b.Multi("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/sample/") - c.Assert(req.Form["uploads"], check.DeepEquals, []string{""}) - c.Assert(req.Form["prefix"], check.DeepEquals, []string{"multi"}) - - req = testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "POST") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form["uploads"], check.DeepEquals, []string{""}) - - c.Assert(multi.UploadId, check.Matches, "JNbR_[A-Za-z0-9.]+QQ--") -} - -func (s *S) TestMultiReturnOld(c *check.C) { - testServer.Response(200, nil, ListMultiResultDump) - - b := s.s3.Bucket("sample") - - multi, err := b.Multi("multi1", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - c.Assert(multi.Key, check.Equals, "multi1") - c.Assert(multi.UploadId, check.Equals, "iUVug89pPvSswrikD") - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/sample/") - c.Assert(req.Form["uploads"], check.DeepEquals, []string{""}) - c.Assert(req.Form["prefix"], check.DeepEquals, []string{"multi1"}) -} - -func (s *S) TestListParts(c *check.C) { - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, ListPartsResultDump1) - testServer.Response(404, nil, NoSuchUploadErrorDump) // :-( - testServer.Response(200, nil, ListPartsResultDump2) - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - parts, err := multi.ListParts() - c.Assert(err, check.IsNil) - c.Assert(parts, check.HasLen, 3) - c.Assert(parts[0].N, check.Equals, 1) - c.Assert(parts[0].Size, check.Equals, int64(5)) - c.Assert(parts[0].ETag, check.Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`) - c.Assert(parts[1].N, check.Equals, 2) - c.Assert(parts[1].Size, check.Equals, int64(5)) - c.Assert(parts[1].ETag, check.Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`) - c.Assert(parts[2].N, check.Equals, 3) - c.Assert(parts[2].Size, check.Equals, int64(5)) - c.Assert(parts[2].ETag, check.Equals, `"49dcd91231f801159e893fb5c6674985"`) - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["max-parts"], check.DeepEquals, []string{"1000"}) - - testServer.WaitRequest() // The internal error. - req = testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["max-parts"], check.DeepEquals, []string{"1000"}) - c.Assert(req.Form["part-number-marker"], check.DeepEquals, []string{"2"}) -} - -func (s *S) TestPutPart(c *check.C) { - headers := map[string]string{ - "ETag": `"26f90efd10d614f100252ff56d88dad8"`, - } - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, headers, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - part, err := multi.PutPart(1, strings.NewReader("")) - c.Assert(err, check.IsNil) - c.Assert(part.N, check.Equals, 1) - c.Assert(part.Size, check.Equals, int64(8)) - c.Assert(part.ETag, check.Equals, headers["ETag"]) - - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"8"}) - c.Assert(req.Header["Content-Md5"], check.DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="}) -} - -func (s *S) TestPutPartCopy(c *check.C) { - testServer.Response(200, nil, InitMultiResultDump) - // PutPartCopy makes a Head request internally to verify access to the source object - // and obtain its size - testServer.Response(200, nil, "content") - testServer.Response(200, nil, PutCopyResultDump) - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - res, part, err := multi.PutPartCopy(1, s3.CopyOptions{}, "source-bucket/\u00FCber-fil\u00E9.jpg") - c.Assert(err, check.IsNil) - c.Assert(part.N, check.Equals, 1) - c.Assert(part.Size, check.Equals, int64(7)) - c.Assert(res, check.DeepEquals, &s3.CopyObjectResult{ - ETag: `"9b2cf535f27731c974343645a3985328"`, - LastModified: `2009-10-28T22:32:00`}) - - // Verify the Head request - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "POST") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") - c.Assert(err, check.IsNil) - - testServer.WaitRequest() - req = testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--") - c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"}) - c.Assert(req.Header["X-Amz-Copy-Source"], check.DeepEquals, []string{`source-bucket%2F%C3%BCber-fil%C3%A9.jpg`}) -} - -func readAll(r io.Reader) string { - data, err := ioutil.ReadAll(r) - if err != nil { - panic(err) - } - return string(data) -} - -func (s *S) TestPutAllNoPreviousUpload(c *check.C) { - // Don't retry the NoSuchUpload error. - s.DisableRetries() - - etag1 := map[string]string{"ETag": `"etag1"`} - etag2 := map[string]string{"ETag": `"etag2"`} - etag3 := map[string]string{"ETag": `"etag3"`} - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(404, nil, NoSuchUploadErrorDump) - testServer.Response(200, etag1, "") - testServer.Response(200, etag2, "") - testServer.Response(200, etag3, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5) - c.Assert(parts, check.HasLen, 3) - c.Assert(parts[0].ETag, check.Equals, `"etag1"`) - c.Assert(parts[1].ETag, check.Equals, `"etag2"`) - c.Assert(parts[2].ETag, check.Equals, `"etag3"`) - c.Assert(err, check.IsNil) - - // Init - testServer.WaitRequest() - - // List old parts. Won't find anything. - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - - // Send part 1. - req = testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"5"}) - c.Assert(readAll(req.Body), check.Equals, "part1") - - // Send part 2. - req = testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"2"}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"5"}) - c.Assert(readAll(req.Body), check.Equals, "part2") - - // Send part 3 with shorter body. - req = testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"3"}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"4"}) - c.Assert(readAll(req.Body), check.Equals, "last") -} - -func (s *S) TestPutAllZeroSizeFile(c *check.C) { - // Don't retry the NoSuchUpload error. - s.DisableRetries() - - etag1 := map[string]string{"ETag": `"etag1"`} - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(404, nil, NoSuchUploadErrorDump) - testServer.Response(200, etag1, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - // Must send at least one part, so that completing it will work. - parts, err := multi.PutAll(strings.NewReader(""), 5) - c.Assert(parts, check.HasLen, 1) - c.Assert(parts[0].ETag, check.Equals, `"etag1"`) - c.Assert(err, check.IsNil) - - // Init - testServer.WaitRequest() - - // List old parts. Won't find anything. - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - - // Send empty part. - req = testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"0"}) - c.Assert(readAll(req.Body), check.Equals, "") -} - -func (s *S) TestPutAllResume(c *check.C) { - etag2 := map[string]string{"ETag": `"etag2"`} - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, ListPartsResultDump1) - testServer.Response(200, nil, ListPartsResultDump2) - testServer.Response(200, etag2, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - // "part1" and "part3" match the checksums in ResultDump1. - // The middle one is a mismatch (it refers to "part2"). - parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5) - c.Assert(parts, check.HasLen, 3) - c.Assert(parts[0].N, check.Equals, 1) - c.Assert(parts[0].Size, check.Equals, int64(5)) - c.Assert(parts[0].ETag, check.Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`) - c.Assert(parts[1].N, check.Equals, 2) - c.Assert(parts[1].Size, check.Equals, int64(5)) - c.Assert(parts[1].ETag, check.Equals, `"etag2"`) - c.Assert(parts[2].N, check.Equals, 3) - c.Assert(parts[2].Size, check.Equals, int64(5)) - c.Assert(parts[2].ETag, check.Equals, `"49dcd91231f801159e893fb5c6674985"`) - c.Assert(err, check.IsNil) - - // Init - testServer.WaitRequest() - - // List old parts, broken in two requests. - for i := 0; i < 2; i++ { - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - } - - // Send part 2, as it didn't match the checksum. - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"2"}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"5"}) - c.Assert(readAll(req.Body), check.Equals, "partX") -} - -func (s *S) TestMultiComplete(c *check.C) { - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, MultiCompleteDump) - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}}) - c.Assert(err, check.IsNil) - - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "POST") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--") - - var payload struct { - XMLName xml.Name - Part []struct { - PartNumber int - ETag string - } - } - - dec := xml.NewDecoder(req.Body) - err = dec.Decode(&payload) - c.Assert(err, check.IsNil) - - c.Assert(payload.XMLName.Local, check.Equals, "CompleteMultipartUpload") - c.Assert(len(payload.Part), check.Equals, 2) - c.Assert(payload.Part[0].PartNumber, check.Equals, 1) - c.Assert(payload.Part[0].ETag, check.Equals, `"ETag1"`) - c.Assert(payload.Part[1].PartNumber, check.Equals, 2) - c.Assert(payload.Part[1].ETag, check.Equals, `"ETag2"`) -} - -func (s *S) TestMultiCompleteError(c *check.C) { - testServer.Response(200, nil, InitMultiResultDump) - // Note the 200 response. Completing will hold the connection on some - // kind of long poll, and may return a late error even after a 200. - testServer.Response(200, nil, InternalErrorDump) - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}}) - c.Assert(err, check.NotNil) - - testServer.WaitRequest() - testServer.WaitRequest() -} - -func (s *S) TestMultiAbort(c *check.C) { - testServer.Response(200, nil, InitMultiResultDump) - testServer.Response(200, nil, "") - - b := s.s3.Bucket("sample") - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - err = multi.Abort() - c.Assert(err, check.IsNil) - - testServer.WaitRequest() - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "DELETE") - c.Assert(req.URL.Path, check.Equals, "/sample/multi") - c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--") -} - -func (s *S) TestListMulti(c *check.C) { - testServer.Response(200, nil, ListMultiResultDump) - - b := s.s3.Bucket("sample") - - multis, prefixes, err := b.ListMulti("", "/") - c.Assert(err, check.IsNil) - c.Assert(prefixes, check.DeepEquals, []string{"a/", "b/"}) - c.Assert(multis, check.HasLen, 2) - c.Assert(multis[0].Key, check.Equals, "multi1") - c.Assert(multis[0].UploadId, check.Equals, "iUVug89pPvSswrikD") - c.Assert(multis[1].Key, check.Equals, "multi2") - c.Assert(multis[1].UploadId, check.Equals, "DkirwsSvPp98guVUi") - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/sample/") - c.Assert(req.Form["uploads"], check.DeepEquals, []string{""}) - c.Assert(req.Form["prefix"], check.DeepEquals, []string{""}) - c.Assert(req.Form["delimiter"], check.DeepEquals, []string{"/"}) - c.Assert(req.Form["max-uploads"], check.DeepEquals, []string{"1000"}) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/responses_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/responses_test.go deleted file mode 100644 index 66fe271b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/responses_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package s3_test - -var PutCopyResultDump = ` - - - 2009-10-28T22:32:00 - "9b2cf535f27731c974343645a3985328" - -` - -var GetObjectErrorDump = ` - -NoSuchBucketThe specified bucket does not exist -non-existent-bucket3F1B667FAD71C3D8 -L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D -` - -var GetListResultDump1 = ` - - - quotes - N - false - - Nelson - 2006-01-01T12:00:00.000Z - "828ef3fdfa96f00ad9f27c383fc9ac7f" - 5 - STANDARD - - bcaf161ca5fb16fd081034f - webfile - - - - Neo - 2006-01-01T12:00:00.000Z - "828ef3fdfa96f00ad9f27c383fc9ac7f" - 4 - STANDARD - - bcaf1ffd86a5fb16fd081034f - webfile - - - -` - -var GetListResultDump2 = ` - - example-bucket - photos/2006/ - some-marker - 1000 - / - false - - - photos/2006/feb/ - - - photos/2006/jan/ - - -` - -var InitMultiResultDump = ` - - - sample - multi - JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- - -` - -var ListPartsResultDump1 = ` - - - sample - multi - JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- - - bb5c0f63b0b25f2d099c - joe - - - bb5c0f63b0b25f2d099c - joe - - STANDARD - 0 - 2 - 2 - true - - 1 - 2013-01-30T13:45:51.000Z - "ffc88b4ca90a355f8ddba6b2c3b2af5c" - 5 - - - 2 - 2013-01-30T13:45:52.000Z - "d067a0fa9dc61a6e7195ca99696b5a89" - 5 - - -` - -var ListPartsResultDump2 = ` - - - sample - multi - JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ-- - - bb5c0f63b0b25f2d099c - joe - - - bb5c0f63b0b25f2d099c - joe - - STANDARD - 2 - 3 - 2 - false - - 3 - 2013-01-30T13:46:50.000Z - "49dcd91231f801159e893fb5c6674985" - 5 - - -` - -var ListMultiResultDump = ` - - - goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a - - - multi1 - iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ-- - / - 1000 - false - - multi1 - iUVug89pPvSswrikD - - bb5c0f63b0b25f2d0 - gustavoniemeyer - - - bb5c0f63b0b25f2d0 - gustavoniemeyer - - STANDARD - 2013-01-30T18:15:47.000Z - - - multi2 - DkirwsSvPp98guVUi - - bb5c0f63b0b25f2d0 - joe - - - bb5c0f63b0b25f2d0 - joe - - STANDARD - 2013-01-30T18:15:47.000Z - - - a/ - - - b/ - - -` - -var NoSuchUploadErrorDump = ` - - - NoSuchUpload - Not relevant - sample - 3F1B667FAD71C3D8 - kjhwqk - -` - -var MultiCompleteDump = ` - - http://Example-Bucket.s3.amazonaws.com/Example-Object - Example-Bucket - Example-Object - "3858f62230ac3c915f300c664312c11f-9" - -` - -var InternalErrorDump = ` - - - InternalError - Not relevant - sample - 3F1B667FAD71C3D8 - kjhwqk - -` - -var GetServiceDump = ` - - - - bcaf1ffd86f461ca5fb16fd081034f - webfile - - - - quotes - 2006-02-03T16:45:09.000Z - - - samples - 2006-02-03T16:41:58.000Z - - - -` - -var GetLocationUsStandard = ` - - -` - -var GetLocationUsWest1 = ` - -us-west-1 -` - -var BucketWebsiteConfigurationDump = ` -example.com` diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3.go deleted file mode 100644 index dd313025..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3.go +++ /dev/null @@ -1,1293 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// - -package s3 - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/httputil" - "net/url" - "path" - "strconv" - "strings" - "time" - - "github.com/AdRoll/goamz/aws" -) - -const debug = false - -// The S3 type encapsulates operations with an S3 region. -type S3 struct { - aws.Auth - aws.Region - ConnectTimeout time.Duration - ReadTimeout time.Duration - Signature int - private byte // Reserve the right of using private data. -} - -// The Bucket type encapsulates operations with an S3 bucket. -type Bucket struct { - *S3 - Name string -} - -// The Owner type represents the owner of the object in an S3 bucket. -type Owner struct { - ID string - DisplayName string -} - -// Fold options into an Options struct -// -type Options struct { - SSE bool - SSECustomerAlgorithm string - SSECustomerKey string - SSECustomerKeyMD5 string - Meta map[string][]string - ContentEncoding string - CacheControl string - RedirectLocation string - ContentMD5 string - ContentDisposition string - Range string - StorageClass StorageClass - // What else? -} - -type CopyOptions struct { - Options - CopySourceOptions string - MetadataDirective string - ContentType string -} - -// CopyObjectResult is the output from a Copy request -type CopyObjectResult struct { - ETag string - LastModified string -} - -var attempts = aws.AttemptStrategy{ - Min: 5, - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, -} - -// New creates a new S3. -func New(auth aws.Auth, region aws.Region) *S3 { - return &S3{auth, region, 0, 0, aws.V2Signature, 0} -} - -// Bucket returns a Bucket with the given name. -func (s3 *S3) Bucket(name string) *Bucket { - if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { - name = strings.ToLower(name) - } - return &Bucket{s3, name} -} - -type BucketInfo struct { - Name string - CreationDate string -} - -type GetServiceResp struct { - Owner Owner - Buckets []BucketInfo `xml:">Bucket"` -} - -// GetService gets a list of all buckets owned by an account. -// -// See http://goo.gl/wbHkGj for details. -func (s3 *S3) GetService() (*GetServiceResp, error) { - bucket := s3.Bucket("") - - r, err := bucket.Get("") - if err != nil { - return nil, err - } - - // Parse the XML response. - var resp GetServiceResp - if err = xml.Unmarshal(r, &resp); err != nil { - return nil, err - } - - return &resp, nil -} - -var createBucketConfiguration = ` - %s -` - -// locationConstraint returns an io.Reader specifying a LocationConstraint if -// required for the region. -// -// See http://goo.gl/bh9Kq for details. -func (s3 *S3) locationConstraint() io.Reader { - constraint := "" - if s3.Region.S3LocationConstraint { - constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) - } - return strings.NewReader(constraint) -} - -type ACL string - -const ( - Private = ACL("private") - PublicRead = ACL("public-read") - PublicReadWrite = ACL("public-read-write") - AuthenticatedRead = ACL("authenticated-read") - BucketOwnerRead = ACL("bucket-owner-read") - BucketOwnerFull = ACL("bucket-owner-full-control") -) - -type StorageClass string - -const ( - ReducedRedundancy = StorageClass("REDUCED_REDUNDANCY") - StandardStorage = StorageClass("STANDARD") -) - -// PutBucket creates a new bucket. -// -// See http://goo.gl/ndjnR for details. -func (b *Bucket) PutBucket(perm ACL) error { - headers := map[string][]string{ - "x-amz-acl": {string(perm)}, - } - req := &request{ - method: "PUT", - bucket: b.Name, - path: "/", - headers: headers, - payload: b.locationConstraint(), - } - return b.S3.query(req, nil) -} - -// DelBucket removes an existing S3 bucket. All objects in the bucket must -// be removed before the bucket itself can be removed. -// -// See http://goo.gl/GoBrY for details. -func (b *Bucket) DelBucket() (err error) { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, nil) - if !shouldRetry(err) { - break - } - } - return err -} - -// Get retrieves an object from an S3 bucket. -// -// See http://goo.gl/isCO7 for details. -func (b *Bucket) Get(path string) (data []byte, err error) { - body, err := b.GetReader(path) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(body) - body.Close() - return data, err -} - -// GetReader retrieves an object from an S3 bucket, -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading. -func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { - resp, err := b.GetResponse(path) - if resp != nil { - return resp.Body, err - } - return nil, err -} - -// GetResponse retrieves an object from an S3 bucket, -// returning the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { - return b.GetResponseWithHeaders(path, make(http.Header)) -} - -// GetReaderWithHeaders retrieves an object from an S3 bucket -// Accepts custom headers to be sent as the second parameter -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) { - req := &request{ - bucket: b.Name, - path: path, - headers: headers, - } - err = b.S3.prepare(req) - if err != nil { - return nil, err - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Exists checks whether or not an object exists on an S3 bucket using a HEAD request. -func (b *Bucket) Exists(path string) (exists bool, err error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - } - err = b.S3.prepare(req) - if err != nil { - return - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - - if shouldRetry(err) && attempt.HasNext() { - continue - } - - if err != nil { - // We can treat a 403 or 404 as non existance - if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { - return false, nil - } - return false, err - } - - if resp.StatusCode/100 == 2 { - exists = true - } - if resp.Body != nil { - resp.Body.Close() - } - return exists, err - } - return false, fmt.Errorf("S3 Currently Unreachable") -} - -// Head HEADs an object in the S3 bucket, returns the response with -// no body see http://bit.ly/17K1ylI -func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - headers: headers, - } - err := b.S3.prepare(req) - if err != nil { - return nil, err - } - - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, err - } - return nil, fmt.Errorf("S3 Currently Unreachable") -} - -// Put inserts an object into the S3 bucket. -// -// See http://goo.gl/FEBPD for details. -func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { - body := bytes.NewBuffer(data) - return b.PutReader(path, body, int64(len(data)), contType, perm, options) -} - -// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key -func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { - headers := map[string][]string{ - "x-amz-acl": {string(perm)}, - "x-amz-copy-source": {url.QueryEscape(source)}, - } - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - } - resp := &CopyObjectResult{} - err := b.S3.query(req, resp) - if err != nil { - return resp, err - } - return resp, nil -} - -// PutReader inserts an object into the S3 bucket by consuming data -// from r until EOF. -func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(length, 10)}, - "Content-Type": {contType}, - "x-amz-acl": {string(perm)}, - } - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - payload: r, - } - return b.S3.query(req, nil) -} - -// addHeaders adds o's specified fields to headers -func (o Options) addHeaders(headers map[string][]string) { - if o.SSE { - headers["x-amz-server-side-encryption"] = []string{"AES256"} - } else if len(o.SSECustomerAlgorithm) != 0 && len(o.SSECustomerKey) != 0 && len(o.SSECustomerKeyMD5) != 0 { - // Amazon-managed keys and customer-managed keys are mutually exclusive - headers["x-amz-server-side-encryption-customer-algorithm"] = []string{o.SSECustomerAlgorithm} - headers["x-amz-server-side-encryption-customer-key"] = []string{o.SSECustomerKey} - headers["x-amz-server-side-encryption-customer-key-MD5"] = []string{o.SSECustomerKeyMD5} - } - if len(o.Range) != 0 { - headers["Range"] = []string{o.Range} - } - if len(o.ContentEncoding) != 0 { - headers["Content-Encoding"] = []string{o.ContentEncoding} - } - if len(o.CacheControl) != 0 { - headers["Cache-Control"] = []string{o.CacheControl} - } - if len(o.ContentMD5) != 0 { - headers["Content-MD5"] = []string{o.ContentMD5} - } - if len(o.RedirectLocation) != 0 { - headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation} - } - if len(o.ContentDisposition) != 0 { - headers["Content-Disposition"] = []string{o.ContentDisposition} - } - if len(o.StorageClass) != 0 { - headers["x-amz-storage-class"] = []string{string(o.StorageClass)} - - } - for k, v := range o.Meta { - headers["x-amz-meta-"+k] = v - } -} - -// addHeaders adds o's specified fields to headers -func (o CopyOptions) addHeaders(headers map[string][]string) { - o.Options.addHeaders(headers) - if len(o.MetadataDirective) != 0 { - headers["x-amz-metadata-directive"] = []string{o.MetadataDirective} - } - if len(o.CopySourceOptions) != 0 { - headers["x-amz-copy-source-range"] = []string{o.CopySourceOptions} - } - if len(o.ContentType) != 0 { - headers["Content-Type"] = []string{o.ContentType} - } -} - -func makeXmlBuffer(doc []byte) *bytes.Buffer { - buf := new(bytes.Buffer) - buf.WriteString(xml.Header) - buf.Write(doc) - return buf -} - -type IndexDocument struct { - Suffix string `xml:"Suffix"` -} - -type ErrorDocument struct { - Key string `xml:"Key"` -} - -type RoutingRule struct { - ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` - RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` - RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` -} - -type RedirectAllRequestsTo struct { - HostName string `xml:"HostName"` - Protocol string `xml:"Protocol,omitempty"` -} - -type WebsiteConfiguration struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"` - IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` - ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` - RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` - RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` -} - -// PutBucketWebsite configures a bucket as a website. -// -// See http://goo.gl/TpRlUy for details. -func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { - doc, err := xml.Marshal(configuration) - if err != nil { - return err - } - - buf := makeXmlBuffer(doc) - - return b.PutBucketSubresource("website", buf, int64(buf.Len())) -} - -func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(length, 10)}, - } - req := &request{ - path: "/", - method: "PUT", - bucket: b.Name, - headers: headers, - payload: r, - params: url.Values{subresource: {""}}, - } - - return b.S3.query(req, nil) -} - -// Del removes an object from the S3 bucket. -// -// See http://goo.gl/APeTt for details. -func (b *Bucket) Del(path string) error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: path, - } - return b.S3.query(req, nil) -} - -type Delete struct { - Quiet bool `xml:"Quiet,omitempty"` - Objects []Object `xml:"Object"` -} - -type Object struct { - Key string `xml:"Key"` - VersionId string `xml:"VersionId,omitempty"` -} - -// DelMulti removes up to 1000 objects from the S3 bucket. -// -// See http://goo.gl/jx6cWK for details. -func (b *Bucket) DelMulti(objects Delete) error { - doc, err := xml.Marshal(objects) - if err != nil { - return err - } - - buf := makeXmlBuffer(doc) - digest := md5.New() - size, err := digest.Write(buf.Bytes()) - if err != nil { - return err - } - - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(int64(size), 10)}, - "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, - "Content-Type": {"text/xml"}, - } - req := &request{ - path: "/", - method: "POST", - params: url.Values{"delete": {""}}, - bucket: b.Name, - headers: headers, - payload: buf, - } - - return b.S3.query(req, nil) -} - -// The ListResp type holds the results of a List bucket operation. -type ListResp struct { - Name string - Prefix string - Delimiter string - Marker string - MaxKeys int - // IsTruncated is true if the results have been truncated because - // there are more keys and prefixes than can fit in MaxKeys. - // N.B. this is the opposite sense to that documented (incorrectly) in - // http://goo.gl/YjQTc - IsTruncated bool - Contents []Key - CommonPrefixes []string `xml:">Prefix"` - // if IsTruncated is true, pass NextMarker as marker argument to List() - // to get the next set of keys - NextMarker string -} - -// The Key type represents an item stored in an S3 bucket. -type Key struct { - Key string - LastModified string - Size int64 - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - StorageClass string - Owner Owner -} - -// List returns information about objects in an S3 bucket. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// The marker parameter specifies the key to start with when listing objects -// in a bucket. Amazon S3 lists objects in alphabetical order and -// will return keys alphabetically greater than the marker. -// -// The max parameter specifies how many keys + common prefixes to return in -// the response. The default is 1000. -// -// For example, given these keys in a bucket: -// -// index.html -// index2.html -// photos/2006/January/sample.jpg -// photos/2006/February/sample2.jpg -// photos/2006/February/sample3.jpg -// photos/2006/February/sample4.jpg -// -// Listing this bucket with delimiter set to "/" would yield the -// following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Contents: []Key{ -// {Key: "index.html", "index2.html"}, -// }, -// CommonPrefixes: []string{ -// "photos/", -// }, -// } -// -// Listing the same bucket with delimiter set to "/" and prefix set to -// "photos/2006/" would yield the following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Prefix: "photos/2006/", -// CommonPrefixes: []string{ -// "photos/2006/February/", -// "photos/2006/January/", -// }, -// } -// -// See http://goo.gl/YjQTc for details. -func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { - params := map[string][]string{ - "prefix": {prefix}, - "delimiter": {delim}, - "marker": {marker}, - } - if max != 0 { - params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} - } - req := &request{ - bucket: b.Name, - params: params, - } - result = &ListResp{} - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - // if NextMarker is not returned, it should be set to the name of last key, - // so let's do it so that each caller doesn't have to - if result.IsTruncated && result.NextMarker == "" { - n := len(result.Contents) - if n > 0 { - result.NextMarker = result.Contents[n-1].Key - } - } - return result, nil -} - -// The VersionsResp type holds the results of a list bucket Versions operation. -type VersionsResp struct { - Name string - Prefix string - KeyMarker string - VersionIdMarker string - MaxKeys int - Delimiter string - IsTruncated bool - Versions []Version `xml:"Version"` - CommonPrefixes []string `xml:">Prefix"` -} - -// The Version type represents an object version stored in an S3 bucket. -type Version struct { - Key string - VersionId string - IsLatest bool - LastModified string - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - Size int64 - Owner Owner - StorageClass string -} - -func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { - params := map[string][]string{ - "versions": {""}, - "prefix": {prefix}, - "delimiter": {delim}, - } - - if len(versionIdMarker) != 0 { - params["version-id-marker"] = []string{versionIdMarker} - } - if len(keyMarker) != 0 { - params["key-marker"] = []string{keyMarker} - } - - if max != 0 { - params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} - } - req := &request{ - bucket: b.Name, - params: params, - } - result = &VersionsResp{} - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return result, nil -} - -type GetLocationResp struct { - Location string `xml:",innerxml"` -} - -func (b *Bucket) Location() (string, error) { - r, err := b.Get("/?location") - if err != nil { - return "", err - } - - // Parse the XML response. - var resp GetLocationResp - if err = xml.Unmarshal(r, &resp); err != nil { - return "", err - } - - if resp.Location == "" { - return "us-east-1", nil - } else { - return resp.Location, nil - } -} - -// URL returns a non-signed URL that allows retriving the -// object at path. It only works if the object is publicly -// readable (see SignedURL). -func (b *Bucket) URL(path string) string { - req := &request{ - bucket: b.Name, - path: path, - } - err := b.S3.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - u.RawQuery = "" - return u.String() -} - -// SignedURL returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURL(path string, expires time.Time) string { - return b.SignedURLWithArgs(path, expires, nil, nil) -} - -// SignedURLWithArgs returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { - return b.SignedURLWithMethod("GET", path, expires, params, headers) -} - -// SignedURLWithMethod returns a signed URL that allows anyone holding the URL -// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. -func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { - var uv = url.Values{} - - if params != nil { - uv = params - } - - if b.S3.Signature == aws.V2Signature { - uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) - } else { - uv.Set("X-Amz-Expires", strconv.FormatInt(expires.Unix()-time.Now().Unix(), 10)) - } - - req := &request{ - method: method, - bucket: b.Name, - path: path, - params: uv, - headers: headers, - } - err := b.S3.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - if b.S3.Auth.Token() != "" && b.S3.Signature == aws.V2Signature { - return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0]) - } else { - return u.String() - } -} - -// UploadSignedURL returns a signed URL that allows anyone holding the URL -// to upload the object at path. The signature is valid until expires. -// contenttype is a string like image/png -// name is the resource name in s3 terminology like images/ali.png [obviously excluding the bucket name itself] -func (b *Bucket) UploadSignedURL(name, method, content_type string, expires time.Time) string { - expire_date := expires.Unix() - if method != "POST" { - method = "PUT" - } - - a := b.S3.Auth - tokenData := "" - - if a.Token() != "" { - tokenData = "x-amz-security-token:" + a.Token() + "\n" - } - - stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name) - secretKey := a.SecretKey - accessId := a.AccessKey - mac := hmac.New(sha1.New, []byte(secretKey)) - mac.Write([]byte(stringToSign)) - macsum := mac.Sum(nil) - signature := base64.StdEncoding.EncodeToString([]byte(macsum)) - signature = strings.TrimSpace(signature) - - var signedurl *url.URL - var err error - if b.Region.S3Endpoint != "" { - signedurl, err = url.Parse(b.Region.S3Endpoint) - name = b.Name + "/" + name - } else { - signedurl, err = url.Parse("https://" + b.Name + ".s3.amazonaws.com/") - } - - if err != nil { - log.Println("ERROR sining url for S3 upload", err) - return "" - } - signedurl.Path = name - params := url.Values{} - params.Add("AWSAccessKeyId", accessId) - params.Add("Expires", strconv.FormatInt(expire_date, 10)) - params.Add("Signature", signature) - if a.Token() != "" { - params.Add("x-amz-security-token", a.Token()) - } - - signedurl.RawQuery = params.Encode() - return signedurl.String() -} - -// PostFormArgs returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -// Additional conditions can be specified with conds -func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { - conditions := make([]string, 0) - fields = map[string]string{ - "AWSAccessKeyId": b.Auth.AccessKey, - "key": path, - } - - if conds != nil { - conditions = append(conditions, conds...) - } - - conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) - conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) - if redirect != "" { - conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) - fields["success_action_redirect"] = redirect - } - - vExpiration := expires.Format("2006-01-02T15:04:05Z") - vConditions := strings.Join(conditions, ",") - policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) - policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) - fields["policy"] = policy64 - - signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey)) - signer.Write([]byte(policy64)) - fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) - - action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name) - return -} - -// PostFormArgs returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { - return b.PostFormArgsEx(path, expires, redirect, nil) -} - -type request struct { - method string - bucket string - path string - params url.Values - headers http.Header - baseurl string - payload io.Reader - prepared bool -} - -func (req *request) url() (*url.URL, error) { - u, err := url.Parse(req.baseurl) - if err != nil { - return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) - } - u.RawQuery = req.params.Encode() - u.Path = req.path - return u, nil -} - -// query prepares and runs the req request. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) query(req *request, resp interface{}) error { - err := s3.prepare(req) - if err != nil { - return err - } - r, err := s3.run(req, resp) - if r != nil && r.Body != nil { - r.Body.Close() - } - return err -} - -// queryV4Signprepares and runs the req request, signed with aws v4 signatures. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) queryV4Sign(req *request, resp interface{}) error { - if req.headers == nil { - req.headers = map[string][]string{} - } - - err := s3.setBaseURL(req) - if err != nil { - return err - } - - hreq, err := s3.setupHttpRequest(req) - if err != nil { - return err - } - - // req.Host must be set for V4 signature calculation - hreq.Host = hreq.URL.Host - - signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) - signer.IncludeXAmzContentSha256 = true - signer.Sign(hreq) - - _, err = s3.doHttpRequest(hreq, resp) - return err -} - -// Sets baseurl on req from bucket name and the region endpoint -func (s3 *S3) setBaseURL(req *request) error { - if req.bucket == "" { - req.baseurl = s3.Region.S3Endpoint - } else { - req.baseurl = s3.Region.S3BucketEndpoint - if req.baseurl == "" { - // Use the path method to address the bucket. - req.baseurl = s3.Region.S3Endpoint - req.path = "/" + req.bucket + req.path - } else { - // Just in case, prevent injection. - if strings.IndexAny(req.bucket, "/:@") >= 0 { - return fmt.Errorf("bad S3 bucket: %q", req.bucket) - } - req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1) - } - } - - return nil -} - -// partiallyEscapedPath partially escapes the S3 path allowing for all S3 REST API calls. -// -// Some commands including: -// GET Bucket acl http://goo.gl/aoXflF -// GET Bucket cors http://goo.gl/UlmBdx -// GET Bucket lifecycle http://goo.gl/8Fme7M -// GET Bucket policy http://goo.gl/ClXIo3 -// GET Bucket location http://goo.gl/5lh8RD -// GET Bucket Logging http://goo.gl/sZ5ckF -// GET Bucket notification http://goo.gl/qSSZKD -// GET Bucket tagging http://goo.gl/QRvxnM -// require the first character after the bucket name in the path to be a literal '?' and -// not the escaped hex representation '%3F'. -func partiallyEscapedPath(path string) string { - pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") - if len(pathEscapedAndSplit) >= 3 { - if len(pathEscapedAndSplit[2]) >= 3 { - // Check for the one "?" that should not be escaped. - if pathEscapedAndSplit[2][0:3] == "%3F" { - pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] - } - } - } - return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) -} - -// prepare sets up req to be delivered to S3. -func (s3 *S3) prepare(req *request) error { - // Copy so they can be mutated without affecting on retries. - params := make(url.Values) - headers := make(http.Header) - for k, v := range req.params { - params[k] = v - } - for k, v := range req.headers { - headers[k] = v - } - req.params = params - req.headers = headers - - if !req.prepared { - req.prepared = true - if req.method == "" { - req.method = "GET" - } - - if !strings.HasPrefix(req.path, "/") { - req.path = "/" + req.path - } - - err := s3.setBaseURL(req) - if err != nil { - return err - } - } - - if s3.Signature == aws.V2Signature && s3.Auth.Token() != "" { - req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()} - } else if s3.Auth.Token() != "" { - req.params.Set("X-Amz-Security-Token", s3.Auth.Token()) - } - - if s3.Signature == aws.V2Signature { - // Always sign again as it's not clear how far the - // server has handled a previous attempt. - u, err := url.Parse(req.baseurl) - if err != nil { - return err - } - - signpathPatiallyEscaped := partiallyEscapedPath(req.path) - req.headers["Host"] = []string{u.Host} - req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)} - - sign(s3.Auth, req.method, signpathPatiallyEscaped, req.params, req.headers) - } else { - hreq, err := s3.setupHttpRequest(req) - if err != nil { - return err - } - - hreq.Host = hreq.URL.Host - signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) - signer.IncludeXAmzContentSha256 = true - signer.Sign(hreq) - - req.payload = hreq.Body - if _, ok := headers["Content-Length"]; ok { - req.headers["Content-Length"] = headers["Content-Length"] - } - } - return nil -} - -// Prepares an *http.Request for doHttpRequest -func (s3 *S3) setupHttpRequest(req *request) (*http.Request, error) { - // Copy so that signing the http request will not mutate it - headers := make(http.Header) - for k, v := range req.headers { - headers[k] = v - } - req.headers = headers - - u, err := req.url() - if err != nil { - return nil, err - } - u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) - - hreq := http.Request{ - URL: u, - Method: req.method, - ProtoMajor: 1, - ProtoMinor: 1, - Close: true, - Header: req.headers, - Form: req.params, - } - - if v, ok := req.headers["Content-Length"]; ok { - hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) - delete(req.headers, "Content-Length") - } - if req.payload != nil { - hreq.Body = ioutil.NopCloser(req.payload) - } - - return &hreq, nil -} - -// doHttpRequest sends hreq and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) { - c := http.Client{ - Transport: &http.Transport{ - Dial: func(netw, addr string) (c net.Conn, err error) { - deadline := time.Now().Add(s3.ReadTimeout) - if s3.ConnectTimeout > 0 { - c, err = net.DialTimeout(netw, addr, s3.ConnectTimeout) - } else { - c, err = net.Dial(netw, addr) - } - if err != nil { - return - } - if s3.ReadTimeout > 0 { - err = c.SetDeadline(deadline) - } - return - }, - Proxy: http.ProxyFromEnvironment, - }, - } - - hresp, err := c.Do(hreq) - if err != nil { - return nil, err - } - if debug { - dump, _ := httputil.DumpResponse(hresp, true) - log.Printf("} -> %s\n", dump) - } - if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { - return nil, buildError(hresp) - } - if resp != nil { - err = xml.NewDecoder(hresp.Body).Decode(resp) - hresp.Body.Close() - - if debug { - log.Printf("goamz.s3> decoded xml into %#v", resp) - } - - } - return hresp, err -} - -// run sends req and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) { - if debug { - log.Printf("Running S3 request: %#v", req) - } - - hreq, err := s3.setupHttpRequest(req) - if err != nil { - return nil, err - } - - return s3.doHttpRequest(hreq, resp) -} - -// Error represents an error in an operation with S3. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - Code string // EC2 error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - BucketName string - RequestId string - HostId string -} - -func (e *Error) Error() string { - return e.Message -} - -func buildError(r *http.Response) error { - if debug { - log.Printf("got error (status code %v)", r.StatusCode) - data, err := ioutil.ReadAll(r.Body) - if err != nil { - log.Printf("\tread error: %v", err) - } else { - log.Printf("\tdata:\n%s\n\n", data) - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - } - - err := Error{} - // TODO return error if Unmarshal fails? - xml.NewDecoder(r.Body).Decode(&err) - r.Body.Close() - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - if debug { - log.Printf("err: %#v\n", err) - } - return &err -} - -func shouldRetry(err error) bool { - if err == nil { - return false - } - switch err { - case io.ErrUnexpectedEOF, io.EOF: - return true - } - switch e := err.(type) { - case *net.DNSError: - return true - case *net.OpError: - switch e.Op { - case "dial", "read", "write": - return true - } - case *url.Error: - // url.Error can be returned either by net/url if a URL cannot be - // parsed, or by net/http if the response is closed before the headers - // are received or parsed correctly. In that later case, e.Op is set to - // the HTTP method name with the first letter uppercased. We don't want - // to retry on POST operations, since those are not idempotent, all the - // other ones should be safe to retry. The only case where all - // operations are safe to retry are "dial" errors, since in that case - // the POST request didn't make it to the server. - - if netErr, ok := e.Err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - - switch e.Op { - case "Get", "Put", "Delete", "Head": - return shouldRetry(e.Err) - default: - return false - } - case *Error: - switch e.Code { - case "InternalError", "NoSuchUpload", "NoSuchBucket": - return true - } - switch e.StatusCode { - case 500, 503, 504: - return true - } - } - return false -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*Error) - return ok && s3err.Code == code -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3_test.go deleted file mode 100644 index 161bb3af..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3_test.go +++ /dev/null @@ -1,502 +0,0 @@ -package s3_test - -import ( - "bytes" - "io/ioutil" - "net/http" - "testing" - "time" - - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" - "github.com/AdRoll/goamz/testutil" - "gopkg.in/check.v1" -) - -func Test(t *testing.T) { - check.TestingT(t) -} - -type S struct { - s3 *s3.S3 -} - -var _ = check.Suite(&S{}) - -var testServer = testutil.NewHTTPServer() - -func (s *S) SetUpSuite(c *check.C) { - testServer.Start() - auth := aws.Auth{AccessKey: "abc", SecretKey: "123"} - s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL}) -} - -func (s *S) TearDownSuite(c *check.C) { - s3.SetAttemptStrategy(nil) -} - -func (s *S) SetUpTest(c *check.C) { - attempts := aws.AttemptStrategy{ - Total: 300 * time.Millisecond, - Delay: 100 * time.Millisecond, - } - s3.SetAttemptStrategy(&attempts) -} - -func (s *S) TearDownTest(c *check.C) { - testServer.Flush() -} - -func (s *S) DisableRetries() { - s3.SetAttemptStrategy(&aws.AttemptStrategy{}) -} - -// PutBucket docs: http://goo.gl/kBTCu - -func (s *S) TestPutBucket(c *check.C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.PutBucket(s3.Private) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/bucket/") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") -} - -// PutBucketWebsite docs: http://goo.gl/TpRlUy - -func (s *S) TestPutBucketWebsite(c *check.C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - config := s3.WebsiteConfiguration{ - RedirectAllRequestsTo: &s3.RedirectAllRequestsTo{HostName: "example.com"}, - } - err := b.PutBucketWebsite(config) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - body, err := ioutil.ReadAll(req.Body) - req.Body.Close() - c.Assert(err, check.IsNil) - c.Assert(string(body), check.Equals, BucketWebsiteConfigurationDump) - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/bucket/") - c.Assert(req.URL.RawQuery, check.Equals, "website=") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") -} - -// Head docs: http://bit.ly/17K1ylI - -func (s *S) TestHead(c *check.C) { - testServer.Response(200, nil, "content") - - b := s.s3.Bucket("bucket") - resp, err := b.Head("name", nil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "HEAD") - c.Assert(req.URL.Path, check.Equals, "/bucket/name") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") - - c.Assert(err, check.IsNil) - c.Assert(resp.ContentLength, check.FitsTypeOf, int64(0)) - c.Assert(resp, check.FitsTypeOf, &http.Response{}) -} - -// DeleteBucket docs: http://goo.gl/GoBrY - -func (s *S) TestDelBucket(c *check.C) { - testServer.Response(204, nil, "") - - b := s.s3.Bucket("bucket") - err := b.DelBucket() - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "DELETE") - c.Assert(req.URL.Path, check.Equals, "/bucket/") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") -} - -// GetObject docs: http://goo.gl/isCO7 - -func (s *S) TestGet(c *check.C) { - testServer.Response(200, nil, "content") - - b := s.s3.Bucket("bucket") - data, err := b.Get("name") - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/bucket/name") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") - - c.Assert(err, check.IsNil) - c.Assert(string(data), check.Equals, "content") -} - -func (s *S) TestGetWithPlus(c *check.C) { - testServer.Response(200, nil, "content") - - b := s.s3.Bucket("bucket") - _, err := b.Get("has+plus") - - req := testServer.WaitRequest() - c.Assert(err, check.IsNil) - c.Assert(req.RequestURI, check.Equals, "http://localhost:4444/bucket/has%2Bplus") -} - -func (s *S) TestURL(c *check.C) { - testServer.Response(200, nil, "content") - - b := s.s3.Bucket("bucket") - url := b.URL("name") - r, err := http.Get(url) - c.Assert(err, check.IsNil) - data, err := ioutil.ReadAll(r.Body) - r.Body.Close() - c.Assert(err, check.IsNil) - c.Assert(string(data), check.Equals, "content") - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/bucket/name") -} - -func (s *S) TestGetReader(c *check.C) { - testServer.Response(200, nil, "content") - - b := s.s3.Bucket("bucket") - rc, err := b.GetReader("name") - c.Assert(err, check.IsNil) - data, err := ioutil.ReadAll(rc) - rc.Close() - c.Assert(err, check.IsNil) - c.Assert(string(data), check.Equals, "content") - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/bucket/name") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") -} - -func (s *S) TestGetNotFound(c *check.C) { - for i := 0; i < 10; i++ { - testServer.Response(404, nil, GetObjectErrorDump) - } - - b := s.s3.Bucket("non-existent-bucket") - data, err := b.Get("non-existent") - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/non-existent-bucket/non-existent") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") - - s3err, _ := err.(*s3.Error) - c.Assert(s3err, check.NotNil) - c.Assert(s3err.StatusCode, check.Equals, 404) - c.Assert(s3err.BucketName, check.Equals, "non-existent-bucket") - c.Assert(s3err.RequestId, check.Equals, "3F1B667FAD71C3D8") - c.Assert(s3err.HostId, check.Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D") - c.Assert(s3err.Code, check.Equals, "NoSuchBucket") - c.Assert(s3err.Message, check.Equals, "The specified bucket does not exist") - c.Assert(s3err.Error(), check.Equals, "The specified bucket does not exist") - c.Assert(data, check.IsNil) -} - -// PutObject docs: http://goo.gl/FEBPD - -func (s *S) TestPutObject(c *check.C) { - testServer.Response(200, nil, "") - const DISPOSITION = "attachment; filename=\"0x1a2b3c.jpg\"" - - b := s.s3.Bucket("bucket") - err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{ContentDisposition: DISPOSITION}) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/bucket/name") - c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"7"}) - c.Assert(req.Header["Content-Disposition"], check.DeepEquals, []string{DISPOSITION}) - //c.Assert(req.Header["Content-MD5"], gocheck.DeepEquals, "...") - c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"}) -} - -func (s *S) TestPutObjectReducedRedundancy(c *check.C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{StorageClass: s3.ReducedRedundancy}) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/bucket/name") - c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"7"}) - c.Assert(req.Header["X-Amz-Storage-Class"], check.DeepEquals, []string{"REDUCED_REDUNDANCY"}) -} - -// PutCopy docs: http://goo.gl/mhEHtA -func (s *S) TestPutCopy(c *check.C) { - testServer.Response(200, nil, PutCopyResultDump) - - b := s.s3.Bucket("bucket") - res, err := b.PutCopy("name", s3.Private, s3.CopyOptions{}, - // 0xFC is ü - 0xE9 is é - "source-bucket/\u00FCber-fil\u00E9.jpg") - c.Assert(err, check.IsNil) - c.Assert(res, check.DeepEquals, &s3.CopyObjectResult{ - ETag: `"9b2cf535f27731c974343645a3985328"`, - LastModified: `2009-10-28T22:32:00`}) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/bucket/name") - c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"0"}) - c.Assert(req.Header["X-Amz-Copy-Source"], check.DeepEquals, []string{`source-bucket%2F%C3%BCber-fil%C3%A9.jpg`}) - c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"}) -} - -func (s *S) TestPutObjectReadTimeout(c *check.C) { - s.s3.ReadTimeout = 50 * time.Millisecond - defer func() { - s.s3.ReadTimeout = 0 - }() - - b := s.s3.Bucket("bucket") - err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{}) - - // Make sure that we get a timeout error. - c.Assert(err, check.NotNil) - - // Set the response after the request times out so that the next request will work. - testServer.Response(200, nil, "") - - // This time set the response within our timeout period so that we expect the call - // to return successfully. - go func() { - time.Sleep(25 * time.Millisecond) - testServer.Response(200, nil, "") - }() - err = b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) -} - -func (s *S) TestPutReader(c *check.C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - buf := bytes.NewBufferString("content") - err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "PUT") - c.Assert(req.URL.Path, check.Equals, "/bucket/name") - c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""}) - c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"content-type"}) - c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"7"}) - //c.Assert(req.Header["Content-MD5"], gocheck.Equals, "...") - c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"}) -} - -// DelObject docs: http://goo.gl/APeTt - -func (s *S) TestDelObject(c *check.C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - err := b.Del("name") - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "DELETE") - c.Assert(req.URL.Path, check.Equals, "/bucket/name") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") -} - -func (s *S) TestDelMultiObjects(c *check.C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - objects := []s3.Object{s3.Object{Key: "test"}} - err := b.DelMulti(s3.Delete{ - Quiet: false, - Objects: objects, - }) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "POST") - c.Assert(req.URL.RawQuery, check.Equals, "delete=") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") - c.Assert(req.Header["Content-MD5"], check.Not(check.Equals), "") - c.Assert(req.Header["Content-Type"], check.Not(check.Equals), "") - c.Assert(req.ContentLength, check.Not(check.Equals), "") -} - -// Bucket List Objects docs: http://goo.gl/YjQTc - -func (s *S) TestList(c *check.C) { - testServer.Response(200, nil, GetListResultDump1) - - b := s.s3.Bucket("quotes") - - data, err := b.List("N", "", "", 0) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/quotes/") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") - c.Assert(req.Form["prefix"], check.DeepEquals, []string{"N"}) - c.Assert(req.Form["delimiter"], check.DeepEquals, []string{""}) - c.Assert(req.Form["marker"], check.DeepEquals, []string{""}) - c.Assert(req.Form["max-keys"], check.DeepEquals, []string(nil)) - - c.Assert(data.Name, check.Equals, "quotes") - c.Assert(data.Prefix, check.Equals, "N") - c.Assert(data.IsTruncated, check.Equals, false) - c.Assert(len(data.Contents), check.Equals, 2) - - c.Assert(data.Contents[0].Key, check.Equals, "Nelson") - c.Assert(data.Contents[0].LastModified, check.Equals, "2006-01-01T12:00:00.000Z") - c.Assert(data.Contents[0].ETag, check.Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`) - c.Assert(data.Contents[0].Size, check.Equals, int64(5)) - c.Assert(data.Contents[0].StorageClass, check.Equals, "STANDARD") - c.Assert(data.Contents[0].Owner.ID, check.Equals, "bcaf161ca5fb16fd081034f") - c.Assert(data.Contents[0].Owner.DisplayName, check.Equals, "webfile") - - c.Assert(data.Contents[1].Key, check.Equals, "Neo") - c.Assert(data.Contents[1].LastModified, check.Equals, "2006-01-01T12:00:00.000Z") - c.Assert(data.Contents[1].ETag, check.Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`) - c.Assert(data.Contents[1].Size, check.Equals, int64(4)) - c.Assert(data.Contents[1].StorageClass, check.Equals, "STANDARD") - c.Assert(data.Contents[1].Owner.ID, check.Equals, "bcaf1ffd86a5fb16fd081034f") - c.Assert(data.Contents[1].Owner.DisplayName, check.Equals, "webfile") -} - -func (s *S) TestListWithDelimiter(c *check.C) { - testServer.Response(200, nil, GetListResultDump2) - - b := s.s3.Bucket("quotes") - - data, err := b.List("photos/2006/", "/", "some-marker", 1000) - c.Assert(err, check.IsNil) - - req := testServer.WaitRequest() - c.Assert(req.Method, check.Equals, "GET") - c.Assert(req.URL.Path, check.Equals, "/quotes/") - c.Assert(req.Header["Date"], check.Not(check.Equals), "") - c.Assert(req.Form["prefix"], check.DeepEquals, []string{"photos/2006/"}) - c.Assert(req.Form["delimiter"], check.DeepEquals, []string{"/"}) - c.Assert(req.Form["marker"], check.DeepEquals, []string{"some-marker"}) - c.Assert(req.Form["max-keys"], check.DeepEquals, []string{"1000"}) - - c.Assert(data.Name, check.Equals, "example-bucket") - c.Assert(data.Prefix, check.Equals, "photos/2006/") - c.Assert(data.Delimiter, check.Equals, "/") - c.Assert(data.Marker, check.Equals, "some-marker") - c.Assert(data.IsTruncated, check.Equals, false) - c.Assert(len(data.Contents), check.Equals, 0) - c.Assert(data.CommonPrefixes, check.DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"}) -} - -func (s *S) TestExists(c *check.C) { - testServer.Response(200, nil, "") - - b := s.s3.Bucket("bucket") - result, err := b.Exists("name") - - req := testServer.WaitRequest() - - c.Assert(req.Method, check.Equals, "HEAD") - - c.Assert(err, check.IsNil) - c.Assert(result, check.Equals, true) -} - -func (s *S) TestExistsNotFound404(c *check.C) { - testServer.Response(404, nil, "") - - b := s.s3.Bucket("bucket") - result, err := b.Exists("name") - - req := testServer.WaitRequest() - - c.Assert(req.Method, check.Equals, "HEAD") - - c.Assert(err, check.IsNil) - c.Assert(result, check.Equals, false) -} - -func (s *S) TestExistsNotFound403(c *check.C) { - testServer.Response(403, nil, "") - - b := s.s3.Bucket("bucket") - result, err := b.Exists("name") - - req := testServer.WaitRequest() - - c.Assert(req.Method, check.Equals, "HEAD") - - c.Assert(err, check.IsNil) - c.Assert(result, check.Equals, false) -} - -func (s *S) TestGetService(c *check.C) { - testServer.Response(200, nil, GetServiceDump) - - expected := s3.GetServiceResp{ - Owner: s3.Owner{ - ID: "bcaf1ffd86f461ca5fb16fd081034f", - DisplayName: "webfile", - }, - Buckets: []s3.BucketInfo{ - s3.BucketInfo{ - Name: "quotes", - CreationDate: "2006-02-03T16:45:09.000Z", - }, - s3.BucketInfo{ - Name: "samples", - CreationDate: "2006-02-03T16:41:58.000Z", - }, - }, - } - - received, err := s.s3.GetService() - - c.Assert(err, check.IsNil) - c.Assert(*received, check.DeepEquals, expected) -} - -func (s *S) TestLocation(c *check.C) { - testServer.Response(200, nil, GetLocationUsStandard) - expectedUsStandard := "us-east-1" - - bucketUsStandard := s.s3.Bucket("us-east-1") - resultUsStandard, err := bucketUsStandard.Location() - - c.Assert(err, check.IsNil) - c.Assert(resultUsStandard, check.Equals, expectedUsStandard) - - testServer.Response(200, nil, GetLocationUsWest1) - expectedUsWest1 := "us-west-1" - - bucketUsWest1 := s.s3.Bucket("us-west-1") - resultUsWest1, err := bucketUsWest1.Location() - - c.Assert(err, check.IsNil) - c.Assert(resultUsWest1, check.Equals, expectedUsWest1) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3i_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3i_test.go deleted file mode 100644 index b0da0130..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3i_test.go +++ /dev/null @@ -1,603 +0,0 @@ -package s3_test - -import ( - "bytes" - "crypto/md5" - "fmt" - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" - "github.com/AdRoll/goamz/testutil" - "gopkg.in/check.v1" - "io/ioutil" - "net" - "net/http" - "sort" - "strings" - "time" -) - -// AmazonServer represents an Amazon S3 server. -type AmazonServer struct { - auth aws.Auth -} - -func (s *AmazonServer) SetUp(c *check.C) { - auth, err := aws.EnvAuth() - if err != nil { - c.Fatal(err.Error()) - } - s.auth = auth -} - -var _ = check.Suite(&AmazonClientSuite{Region: aws.USEast}) -var _ = check.Suite(&AmazonClientSuite{Region: aws.EUWest}) -var _ = check.Suite(&AmazonDomainClientSuite{Region: aws.USEast}) - -// AmazonClientSuite tests the client against a live S3 server. -type AmazonClientSuite struct { - aws.Region - srv AmazonServer - ClientTests -} - -func (s *AmazonClientSuite) SetUpSuite(c *check.C) { - if !testutil.Amazon { - c.Skip("live tests against AWS disabled (no -amazon)") - } - s.srv.SetUp(c) - s.s3 = s3.New(s.srv.auth, s.Region) - // In case tests were interrupted in the middle before. - s.ClientTests.Cleanup() -} - -func (s *AmazonClientSuite) TearDownTest(c *check.C) { - s.ClientTests.Cleanup() -} - -// AmazonDomainClientSuite tests the client against a live S3 -// server using bucket names in the endpoint domain name rather -// than the request path. -type AmazonDomainClientSuite struct { - aws.Region - srv AmazonServer - ClientTests -} - -func (s *AmazonDomainClientSuite) SetUpSuite(c *check.C) { - if !testutil.Amazon { - c.Skip("live tests against AWS disabled (no -amazon)") - } - s.srv.SetUp(c) - region := s.Region - region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com" - s.s3 = s3.New(s.srv.auth, region) - s.ClientTests.Cleanup() -} - -func (s *AmazonDomainClientSuite) TearDownTest(c *check.C) { - s.ClientTests.Cleanup() -} - -// ClientTests defines integration tests designed to test the client. -// It is not used as a test suite in itself, but embedded within -// another type. -type ClientTests struct { - s3 *s3.S3 - authIsBroken bool -} - -func (s *ClientTests) Cleanup() { - killBucket(testBucket(s.s3)) -} - -func testBucket(s *s3.S3) *s3.Bucket { - // Watch out! If this function is corrupted and made to match with something - // people own, killBucket will happily remove *everything* inside the bucket. - key := s.Auth.AccessKey - if len(key) >= 8 { - key = s.Auth.AccessKey[:8] - } - return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key)) -} - -var attempts = aws.AttemptStrategy{ - Min: 5, - Total: 20 * time.Second, - Delay: 100 * time.Millisecond, -} - -func killBucket(b *s3.Bucket) { - var err error - for attempt := attempts.Start(); attempt.Next(); { - err = b.DelBucket() - if err == nil { - return - } - if _, ok := err.(*net.DNSError); ok { - return - } - e, ok := err.(*s3.Error) - if ok && e.Code == "NoSuchBucket" { - return - } - if ok && e.Code == "BucketNotEmpty" { - // Errors are ignored here. Just retry. - resp, err := b.List("", "", "", 1000) - if err == nil { - for _, key := range resp.Contents { - _ = b.Del(key.Key) - } - } - multis, _, _ := b.ListMulti("", "") - for _, m := range multis { - _ = m.Abort() - } - } - } - message := "cannot delete test bucket" - if err != nil { - message += ": " + err.Error() - } - panic(message) -} - -func get(url string) ([]byte, error) { - for attempt := attempts.Start(); attempt.Next(); { - resp, err := http.Get(url) - if err != nil { - if attempt.HasNext() { - continue - } - return nil, err - } - data, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - if attempt.HasNext() { - continue - } - return nil, err - } - return data, err - } - panic("unreachable") -} - -func (s *ClientTests) TestBasicFunctionality(c *check.C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.PublicRead) - c.Assert(err, check.IsNil) - - err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead, s3.Options{}) - c.Assert(err, check.IsNil) - defer b.Del("name") - - data, err := b.Get("name") - c.Assert(err, check.IsNil) - c.Assert(string(data), check.Equals, "yo!") - - data, err = get(b.URL("name")) - c.Assert(err, check.IsNil) - c.Assert(string(data), check.Equals, "yo!") - - buf := bytes.NewBufferString("hey!") - err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - defer b.Del("name2") - - rc, err := b.GetReader("name2") - c.Assert(err, check.IsNil) - data, err = ioutil.ReadAll(rc) - c.Check(err, check.IsNil) - c.Check(string(data), check.Equals, "hey!") - rc.Close() - - data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour))) - c.Assert(err, check.IsNil) - c.Assert(string(data), check.Equals, "hey!") - - if !s.authIsBroken { - data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour))) - c.Assert(err, check.IsNil) - c.Assert(string(data), check.Matches, "(?s).*AccessDenied.*") - } - - err = b.DelBucket() - c.Assert(err, check.NotNil) - - s3err, ok := err.(*s3.Error) - c.Assert(ok, check.Equals, true) - c.Assert(s3err.Code, check.Equals, "BucketNotEmpty") - c.Assert(s3err.BucketName, check.Equals, b.Name) - c.Assert(s3err.Message, check.Equals, "The bucket you tried to delete is not empty") - - err = b.Del("name") - c.Assert(err, check.IsNil) - err = b.Del("name2") - c.Assert(err, check.IsNil) - - err = b.DelBucket() - c.Assert(err, check.IsNil) -} - -func (s *ClientTests) TestGetNotFound(c *check.C) { - b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey) - data, err := b.Get("non-existent") - - s3err, _ := err.(*s3.Error) - c.Assert(s3err, check.NotNil) - c.Assert(s3err.StatusCode, check.Equals, 404) - c.Assert(s3err.Code, check.Equals, "NoSuchBucket") - c.Assert(s3err.Message, check.Equals, "The specified bucket does not exist") - c.Assert(data, check.IsNil) -} - -// Communicate with all endpoints to see if they are alive. -func (s *ClientTests) TestRegions(c *check.C) { - errs := make(chan error, len(aws.Regions)) - for _, region := range aws.Regions { - go func(r aws.Region) { - s := s3.New(s.s3.Auth, r) - b := s.Bucket("goamz-" + s.Auth.AccessKey) - _, err := b.Get("non-existent") - errs <- err - }(region) - } - for _ = range aws.Regions { - err := <-errs - if err != nil { - s3_err, ok := err.(*s3.Error) - if ok { - c.Check(s3_err.Code, check.Matches, "NoSuchBucket") - } else if _, ok = err.(*net.DNSError); ok { - // Okay as well. - } else { - c.Errorf("Non-S3 error: %s", err) - } - } else { - c.Errorf("Test should have errored but it seems to have succeeded") - } - } -} - -var objectNames = []string{ - "index.html", - "index2.html", - "photos/2006/February/sample2.jpg", - "photos/2006/February/sample3.jpg", - "photos/2006/February/sample4.jpg", - "photos/2006/January/sample.jpg", - "test/bar", - "test/foo", -} - -func keys(names ...string) []s3.Key { - ks := make([]s3.Key, len(names)) - for i, name := range names { - ks[i].Key = name - } - return ks -} - -// As the ListResp specifies all the parameters to the -// request too, we use it to specify request parameters -// and expected results. The Contents field is -// used only for the key names inside it. -var listTests = []s3.ListResp{ - // normal list. - { - Contents: keys(objectNames...), - }, { - Marker: objectNames[0], - Contents: keys(objectNames[1:]...), - }, { - Marker: objectNames[0] + "a", - Contents: keys(objectNames[1:]...), - }, { - Marker: "z", - }, - - // limited results. - { - MaxKeys: 2, - Contents: keys(objectNames[0:2]...), - IsTruncated: true, - }, { - MaxKeys: 2, - Marker: objectNames[0], - Contents: keys(objectNames[1:3]...), - IsTruncated: true, - }, { - MaxKeys: 2, - Marker: objectNames[len(objectNames)-2], - Contents: keys(objectNames[len(objectNames)-1:]...), - }, - - // with delimiter - { - Delimiter: "/", - CommonPrefixes: []string{"photos/", "test/"}, - Contents: keys("index.html", "index2.html"), - }, { - Delimiter: "/", - Prefix: "photos/2006/", - CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"}, - }, { - Delimiter: "/", - Prefix: "t", - CommonPrefixes: []string{"test/"}, - }, { - Delimiter: "/", - MaxKeys: 1, - Contents: keys("index.html"), - IsTruncated: true, - }, { - Delimiter: "/", - MaxKeys: 1, - Marker: "index2.html", - CommonPrefixes: []string{"photos/"}, - IsTruncated: true, - }, { - Delimiter: "/", - MaxKeys: 1, - Marker: "photos/", - CommonPrefixes: []string{"test/"}, - IsTruncated: false, - }, { - Delimiter: "Feb", - CommonPrefixes: []string{"photos/2006/Feb"}, - Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"), - }, -} - -func (s *ClientTests) TestDoublePutBucket(c *check.C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.PublicRead) - c.Assert(err, check.IsNil) - - err = b.PutBucket(s3.PublicRead) - if err != nil { - c.Assert(err, check.FitsTypeOf, new(s3.Error)) - c.Assert(err.(*s3.Error).Code, check.Equals, "BucketAlreadyOwnedByYou") - } -} - -func (s *ClientTests) TestBucketList(c *check.C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, check.IsNil) - - objData := make(map[string][]byte) - for i, path := range objectNames { - data := []byte(strings.Repeat("a", i)) - err := b.Put(path, data, "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - defer b.Del(path) - objData[path] = data - } - - for i, t := range listTests { - c.Logf("test %d", i) - resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys) - c.Assert(err, check.IsNil) - c.Check(resp.Name, check.Equals, b.Name) - c.Check(resp.Delimiter, check.Equals, t.Delimiter) - c.Check(resp.IsTruncated, check.Equals, t.IsTruncated) - c.Check(resp.CommonPrefixes, check.DeepEquals, t.CommonPrefixes) - checkContents(c, resp.Contents, objData, t.Contents) - } -} - -func etag(data []byte) string { - sum := md5.New() - sum.Write(data) - return fmt.Sprintf(`"%x"`, sum.Sum(nil)) -} - -func checkContents(c *check.C, contents []s3.Key, data map[string][]byte, expected []s3.Key) { - c.Assert(contents, check.HasLen, len(expected)) - for i, k := range contents { - c.Check(k.Key, check.Equals, expected[i].Key) - // TODO mtime - c.Check(k.Size, check.Equals, int64(len(data[k.Key]))) - c.Check(k.ETag, check.Equals, etag(data[k.Key])) - } -} - -func (s *ClientTests) TestMultiInitPutList(c *check.C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, check.IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - c.Assert(multi.UploadId, check.Matches, ".+") - defer multi.Abort() - - var sent []s3.Part - - for i := 0; i < 5; i++ { - p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("", i+1))) - c.Assert(err, check.IsNil) - c.Assert(p.N, check.Equals, i+1) - c.Assert(p.Size, check.Equals, int64(8)) - c.Assert(p.ETag, check.Matches, ".+") - sent = append(sent, p) - } - - s3.SetListPartsMax(2) - - parts, err := multi.ListParts() - c.Assert(err, check.IsNil) - c.Assert(parts, check.HasLen, len(sent)) - for i := range parts { - c.Assert(parts[i].N, check.Equals, sent[i].N) - c.Assert(parts[i].Size, check.Equals, sent[i].Size) - c.Assert(parts[i].ETag, check.Equals, sent[i].ETag) - } - - err = multi.Complete(parts) - s3err, failed := err.(*s3.Error) - c.Assert(failed, check.Equals, true) - c.Assert(s3err.Code, check.Equals, "EntityTooSmall") - - err = multi.Abort() - c.Assert(err, check.IsNil) - _, err = multi.ListParts() - s3err, ok := err.(*s3.Error) - c.Assert(ok, check.Equals, true) - c.Assert(s3err.Code, check.Equals, "NoSuchUpload") -} - -// This may take a minute or more due to the minimum size accepted S3 -// on multipart upload parts. -func (s *ClientTests) TestMultiComplete(c *check.C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, check.IsNil) - - contentType := "text/plain" - meta := make(map[string][]string) - meta["X-Amz-Meta-TestField"] = []string{"testValue"} - options := s3.Options{ContentEncoding: "identity", ContentDisposition: "inline", Meta: meta} - multi, err := b.InitMulti("multi", contentType, s3.Private, options) - c.Assert(err, check.IsNil) - c.Assert(multi.UploadId, check.Matches, ".+") - defer multi.Abort() - - // Minimum size S3 accepts for all but the last part is 5MB. - data1 := make([]byte, 5*1024*1024) - data2 := []byte("") - - part1, err := multi.PutPart(1, bytes.NewReader(data1)) - c.Assert(err, check.IsNil) - part2, err := multi.PutPart(2, bytes.NewReader(data2)) - c.Assert(err, check.IsNil) - - // Purposefully reversed. The order requirement must be handled. - err = multi.Complete([]s3.Part{part2, part1}) - c.Assert(err, check.IsNil) - - data, err := b.Get("multi") - c.Assert(err, check.IsNil) - - c.Assert(len(data), check.Equals, len(data1)+len(data2)) - for i := range data1 { - if data[i] != data1[i] { - c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i]) - } - } - c.Assert(string(data[len(data1):]), check.Equals, string(data2)) - - resp, err := b.GetResponse("multi") - c.Assert(resp.Header.Get("Content-Type"), check.Equals, contentType) - c.Assert(resp.Header.Get("x-amz-acl"), check.Equals, s3.Private) - c.Assert(resp.Header.Get("Content-MD5"), check.Equals, options.ContentMD5) - c.Assert(resp.Header.Get("Content-Encoding"), check.Equals, options.ContentEncoding) - c.Assert(resp.Header.Get("Content-Disposition"), check.Equals, options.ContentDisposition) - for k, values := range meta { - c.Assert(resp.Header.Get(k), check.Equals, strings.Join(values, ",")) - } -} - -type multiList []*s3.Multi - -func (l multiList) Len() int { return len(l) } -func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key } -func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } - -func (s *ClientTests) TestListMulti(c *check.C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, check.IsNil) - - // Ensure an empty state before testing its behavior. - multis, _, err := b.ListMulti("", "") - for _, m := range multis { - err := m.Abort() - c.Assert(err, check.IsNil) - } - - keys := []string{ - "a/multi2", - "a/multi3", - "b/multi4", - "multi1", - } - for _, key := range keys { - m, err := b.InitMulti(key, "", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - defer m.Abort() - } - - // Amazon's implementation of the multiple-request listing for - // multipart uploads in progress seems broken in multiple ways. - // (next tokens are not provided, etc). - //s3.SetListMultiMax(2) - - multis, prefixes, err := b.ListMulti("", "") - c.Assert(err, check.IsNil) - for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); { - multis, prefixes, err = b.ListMulti("", "") - c.Assert(err, check.IsNil) - } - sort.Sort(multiList(multis)) - c.Assert(prefixes, check.IsNil) - var gotKeys []string - for _, m := range multis { - gotKeys = append(gotKeys, m.Key) - } - c.Assert(gotKeys, check.DeepEquals, keys) - for _, m := range multis { - c.Assert(m.Bucket, check.Equals, b) - c.Assert(m.UploadId, check.Matches, ".+") - } - - multis, prefixes, err = b.ListMulti("", "/") - for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; { - multis, prefixes, err = b.ListMulti("", "") - c.Assert(err, check.IsNil) - } - c.Assert(err, check.IsNil) - c.Assert(prefixes, check.DeepEquals, []string{"a/", "b/"}) - c.Assert(multis, check.HasLen, 1) - c.Assert(multis[0].Bucket, check.Equals, b) - c.Assert(multis[0].Key, check.Equals, "multi1") - c.Assert(multis[0].UploadId, check.Matches, ".+") - - for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; { - multis, prefixes, err = b.ListMulti("", "") - c.Assert(err, check.IsNil) - } - multis, prefixes, err = b.ListMulti("a/", "/") - c.Assert(err, check.IsNil) - c.Assert(prefixes, check.IsNil) - c.Assert(multis, check.HasLen, 2) - c.Assert(multis[0].Bucket, check.Equals, b) - c.Assert(multis[0].Key, check.Equals, "a/multi2") - c.Assert(multis[0].UploadId, check.Matches, ".+") - c.Assert(multis[1].Bucket, check.Equals, b) - c.Assert(multis[1].Key, check.Equals, "a/multi3") - c.Assert(multis[1].UploadId, check.Matches, ".+") -} - -func (s *ClientTests) TestMultiPutAllZeroLength(c *check.C) { - b := testBucket(s.s3) - err := b.PutBucket(s3.Private) - c.Assert(err, check.IsNil) - - multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{}) - c.Assert(err, check.IsNil) - defer multi.Abort() - - // This tests an edge case. Amazon requires at least one - // part for multiprat uploads to work, even the part is empty. - parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024) - c.Assert(err, check.IsNil) - c.Assert(parts, check.HasLen, 1) - c.Assert(parts[0].Size, check.Equals, int64(0)) - c.Assert(parts[0].ETag, check.Equals, `"d41d8cd98f00b204e9800998ecf8427e"`) - - err = multi.Complete(parts) - c.Assert(err, check.IsNil) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3t_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3t_test.go deleted file mode 100644 index 72279ff3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3t_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package s3_test - -import ( - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" - "github.com/AdRoll/goamz/s3/s3test" - "github.com/AdRoll/goamz/testutil" - "gopkg.in/check.v1" -) - -type LocalServer struct { - auth aws.Auth - region aws.Region - srv *s3test.Server - config *s3test.Config -} - -func (s *LocalServer) SetUp(c *check.C) { - srv, err := s3test.NewServer(s.config) - c.Assert(err, check.IsNil) - c.Assert(srv, check.NotNil) - - s.srv = srv - s.region = aws.Region{ - Name: "faux-region-1", - S3Endpoint: srv.URL(), - S3LocationConstraint: true, // s3test server requires a LocationConstraint - } -} - -// LocalServerSuite defines tests that will run -// against the local s3test server. It includes -// selected tests from ClientTests; -// when the s3test functionality is sufficient, it should -// include all of them, and ClientTests can be simply embedded. -type LocalServerSuite struct { - srv LocalServer - clientTests ClientTests -} - -var ( - // run tests twice, once in us-east-1 mode, once not. - _ = check.Suite(&LocalServerSuite{}) - _ = check.Suite(&LocalServerSuite{ - srv: LocalServer{ - config: &s3test.Config{ - Send409Conflict: true, - }, - }, - }) -) - -func (s *LocalServerSuite) SetUpSuite(c *check.C) { - s.srv.SetUp(c) - s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region) - - // TODO Sadly the fake server ignores auth completely right now. :-( - s.clientTests.authIsBroken = true - s.clientTests.Cleanup() -} - -func (s *LocalServerSuite) TearDownTest(c *check.C) { - s.clientTests.Cleanup() -} - -func (s *LocalServerSuite) TestBasicFunctionality(c *check.C) { - s.clientTests.TestBasicFunctionality(c) -} - -func (s *LocalServerSuite) TestGetNotFound(c *check.C) { - s.clientTests.TestGetNotFound(c) -} - -func (s *LocalServerSuite) TestBucketList(c *check.C) { - s.clientTests.TestBucketList(c) -} - -func (s *LocalServerSuite) TestDoublePutBucket(c *check.C) { - s.clientTests.TestDoublePutBucket(c) -} - -func (s *LocalServerSuite) TestMultiComplete(c *check.C) { - if !testutil.Amazon { - c.Skip("live tests against AWS disabled (no -amazon)") - } - s.clientTests.TestMultiComplete(c) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3test/server.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3test/server.go deleted file mode 100644 index 0dd63af3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/s3test/server.go +++ /dev/null @@ -1,928 +0,0 @@ -package s3test - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "fmt" - "github.com/AdRoll/goamz/s3" - "io" - "io/ioutil" - "log" - "math/rand" - "net" - "net/http" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -const debug = false - -var rangePattern = regexp.MustCompile(`^bytes=([\d]*)-([\d]*)$`) - -type s3Error struct { - statusCode int - XMLName struct{} `xml:"Error"` - Code string - Message string - BucketName string - RequestId string - HostId string -} - -type action struct { - srv *Server - w http.ResponseWriter - req *http.Request - reqId string -} - -// Config controls the internal behaviour of the Server. A nil config is the default -// and behaves as if all configurations assume their default behaviour. Once passed -// to NewServer, the configuration must not be modified. -type Config struct { - // Send409Conflict controls how the Server will respond to calls to PUT on a - // previously existing bucket. The default is false, and corresponds to the - // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of - // all other regions. - // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html - Send409Conflict bool - - // Address on which to listen. By default, a random port is assigned by the - // operating system and the server listens on localhost. - ListenAddress string -} - -func (c *Config) send409Conflict() bool { - if c != nil { - return c.Send409Conflict - } - return false -} - -// Server is a fake S3 server for testing purposes. -// All of the data for the server is kept in memory. -type Server struct { - url string - reqId int - listener net.Listener - mu sync.Mutex - buckets map[string]*bucket - config *Config -} - -type bucket struct { - name string - acl s3.ACL - ctime time.Time - objects map[string]*object - multipartUploads map[string][]*multipartUploadPart - multipartMeta map[string]http.Header -} - -type object struct { - name string - mtime time.Time - meta http.Header // metadata to return with requests. - checksum []byte // also held as Content-MD5 in meta. - data []byte -} - -type multipartUploadPart struct { - index uint - data []byte - etag string - lastModified time.Time -} - -type multipartUploadPartByIndex []*multipartUploadPart - -func (x multipartUploadPartByIndex) Len() int { - return len(x) -} - -func (x multipartUploadPartByIndex) Swap(i, j int) { - x[i], x[j] = x[j], x[i] -} - -func (x multipartUploadPartByIndex) Less(i, j int) bool { - return x[i].index < x[j].index -} - -// A resource encapsulates the subject of an HTTP request. -// The resource referred to may or may not exist -// when the request is made. -type resource interface { - put(a *action) interface{} - get(a *action) interface{} - post(a *action) interface{} - delete(a *action) interface{} -} - -func NewServer(config *Config) (*Server, error) { - listenAddress := "localhost:0" - - if config != nil && config.ListenAddress != "" { - listenAddress = config.ListenAddress - } - - l, err := net.Listen("tcp", listenAddress) - if err != nil { - return nil, fmt.Errorf("cannot listen on localhost: %v", err) - } - srv := &Server{ - listener: l, - url: "http://" + l.Addr().String(), - buckets: make(map[string]*bucket), - config: config, - } - go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - srv.serveHTTP(w, req) - })) - return srv, nil -} - -// Quit closes down the server. -func (srv *Server) Quit() { - srv.listener.Close() -} - -// URL returns a URL for the server. -func (srv *Server) URL() string { - return srv.url -} - -func fatalf(code int, codeStr string, errf string, a ...interface{}) { - panic(&s3Error{ - statusCode: code, - Code: codeStr, - Message: fmt.Sprintf(errf, a...), - }) -} - -// serveHTTP serves the S3 protocol. -func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) { - // ignore error from ParseForm as it's usually spurious. - req.ParseForm() - - srv.mu.Lock() - defer srv.mu.Unlock() - - if debug { - log.Printf("s3test %q %q", req.Method, req.URL) - } - a := &action{ - srv: srv, - w: w, - req: req, - reqId: fmt.Sprintf("%09X", srv.reqId), - } - srv.reqId++ - - var r resource - defer func() { - switch err := recover().(type) { - case *s3Error: - switch r := r.(type) { - case objectResource: - err.BucketName = r.bucket.name - case bucketResource: - err.BucketName = r.name - } - err.RequestId = a.reqId - // TODO HostId - w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`) - w.WriteHeader(err.statusCode) - xmlMarshal(w, err) - case nil: - default: - panic(err) - } - }() - - r = srv.resourceForURL(req.URL) - - var resp interface{} - switch req.Method { - case "PUT": - resp = r.put(a) - case "GET", "HEAD": - resp = r.get(a) - case "DELETE": - resp = r.delete(a) - case "POST": - resp = r.post(a) - default: - fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) - } - if resp != nil && req.Method != "HEAD" { - xmlMarshal(w, resp) - } -} - -// xmlMarshal is the same as xml.Marshal except that -// it panics on error. The marshalling should not fail, -// but we want to know if it does. -func xmlMarshal(w io.Writer, x interface{}) { - if err := xml.NewEncoder(w).Encode(x); err != nil { - panic(fmt.Errorf("error marshalling %#v: %v", x, err)) - } -} - -// In a fully implemented test server, each of these would have -// its own resource type. -var unimplementedBucketResourceNames = map[string]bool{ - "acl": true, - "lifecycle": true, - "policy": true, - "location": true, - "logging": true, - "notification": true, - "versions": true, - "requestPayment": true, - "versioning": true, - "website": true, - "uploads": true, -} - -var unimplementedObjectResourceNames = map[string]bool{ - "acl": true, - "torrent": true, -} - -var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?") - -// resourceForURL returns a resource object for the given URL. -func (srv *Server) resourceForURL(u *url.URL) (r resource) { - m := pathRegexp.FindStringSubmatch(u.Path) - if m == nil { - fatalf(404, "InvalidURI", "Couldn't parse the specified URI") - } - bucketName := m[2] - objectName := m[4] - if bucketName == "" { - return nullResource{} // root - } - b := bucketResource{ - name: bucketName, - bucket: srv.buckets[bucketName], - } - q := u.Query() - if objectName == "" { - for name := range q { - if unimplementedBucketResourceNames[name] { - return nullResource{} - } - } - return b - - } - if b.bucket == nil { - fatalf(404, "NoSuchBucket", "The specified bucket does not exist") - } - objr := objectResource{ - name: objectName, - version: q.Get("versionId"), - bucket: b.bucket, - } - for name := range q { - if unimplementedObjectResourceNames[name] { - return nullResource{} - } - } - if obj := objr.bucket.objects[objr.name]; obj != nil { - objr.object = obj - } - return objr -} - -// nullResource has error stubs for all resource methods. -type nullResource struct{} - -func notAllowed() interface{} { - fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") - return nil -} - -func (nullResource) put(a *action) interface{} { return notAllowed() } -func (nullResource) get(a *action) interface{} { return notAllowed() } -func (nullResource) post(a *action) interface{} { return notAllowed() } -func (nullResource) delete(a *action) interface{} { return notAllowed() } - -const timeFormat = "2006-01-02T15:04:05.000Z07:00" - -type bucketResource struct { - name string - bucket *bucket // non-nil if the bucket already exists. -} - -// GET on a bucket lists the objects in the bucket. -// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html -func (r bucketResource) get(a *action) interface{} { - if r.bucket == nil { - fatalf(404, "NoSuchBucket", "The specified bucket does not exist") - } - delimiter := a.req.Form.Get("delimiter") - marker := a.req.Form.Get("marker") - maxKeys := -1 - if s := a.req.Form.Get("max-keys"); s != "" { - i, err := strconv.Atoi(s) - if err != nil || i < 0 { - fatalf(400, "invalid value for max-keys: %q", s) - } - maxKeys = i - } - prefix := a.req.Form.Get("prefix") - a.w.Header().Set("Content-Type", "application/xml") - - if a.req.Method == "HEAD" { - return nil - } - - var objs orderedObjects - - // first get all matching objects and arrange them in alphabetical order. - for name, obj := range r.bucket.objects { - if strings.HasPrefix(name, prefix) { - objs = append(objs, obj) - } - } - sort.Sort(objs) - - if maxKeys <= 0 { - maxKeys = 1000 - } - resp := &s3.ListResp{ - Name: r.bucket.name, - Prefix: prefix, - Delimiter: delimiter, - Marker: marker, - MaxKeys: maxKeys, - } - - var prefixes []string - for _, obj := range objs { - if !strings.HasPrefix(obj.name, prefix) { - continue - } - name := obj.name - isPrefix := false - if delimiter != "" { - if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { - name = obj.name[:len(prefix)+i+len(delimiter)] - if prefixes != nil && prefixes[len(prefixes)-1] == name { - continue - } - isPrefix = true - } - } - if name <= marker { - continue - } - if len(resp.Contents)+len(prefixes) >= maxKeys { - resp.IsTruncated = true - break - } - if isPrefix { - prefixes = append(prefixes, name) - } else { - // Contents contains only keys not found in CommonPrefixes - resp.Contents = append(resp.Contents, obj.s3Key()) - } - } - resp.CommonPrefixes = prefixes - return resp -} - -// orderedObjects holds a slice of objects that can be sorted -// by name. -type orderedObjects []*object - -func (s orderedObjects) Len() int { - return len(s) -} -func (s orderedObjects) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s orderedObjects) Less(i, j int) bool { - return s[i].name < s[j].name -} - -func (obj *object) s3Key() s3.Key { - return s3.Key{ - Key: obj.name, - LastModified: obj.mtime.Format(timeFormat), - Size: int64(len(obj.data)), - ETag: fmt.Sprintf(`"%x"`, obj.checksum), - // TODO StorageClass - // TODO Owner - } -} - -// DELETE on a bucket deletes the bucket if it's not empty. -func (r bucketResource) delete(a *action) interface{} { - b := r.bucket - if b == nil { - fatalf(404, "NoSuchBucket", "The specified bucket does not exist") - } - if len(b.objects) > 0 { - fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty") - } - delete(a.srv.buckets, b.name) - return nil -} - -// PUT on a bucket creates the bucket. -// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html -func (r bucketResource) put(a *action) interface{} { - var created bool - if r.bucket == nil { - if !validBucketName(r.name) { - fatalf(400, "InvalidBucketName", "The specified bucket is not valid") - } - if loc := locationConstraint(a); loc == "" { - fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.") - } - // TODO validate acl - r.bucket = &bucket{ - name: r.name, - // TODO default acl - objects: make(map[string]*object), - multipartUploads: make(map[string][]*multipartUploadPart), - multipartMeta: make(map[string]http.Header), - } - a.srv.buckets[r.name] = r.bucket - created = true - } - if !created && a.srv.config.send409Conflict() { - fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.") - } - r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl")) - return nil -} - -func (r bucketResource) post(a *action) interface{} { - if _, multiDel := a.req.URL.Query()["delete"]; multiDel { - return r.multiDel(a) - } - - fatalf(400, "Method", "bucket operation not supported") - return nil -} - -func (b bucketResource) multiDel(a *action) interface{} { - type multiDelRequestObject struct { - Key string - VersionId string - } - - type multiDelRequest struct { - Quiet bool - Object []*multiDelRequestObject - } - - type multiDelDelete struct { - XMLName struct{} `xml:"Deleted"` - Key string - } - - type multiDelError struct { - XMLName struct{} `xml:"Error"` - Key string - Code string - Message string - } - - type multiDelResult struct { - XMLName struct{} `xml:"DeleteResult"` - Deleted []*multiDelDelete - Error []*multiDelError - } - - req := &multiDelRequest{} - - if err := xml.NewDecoder(a.req.Body).Decode(req); err != nil { - fatalf(400, "InvalidRequest", err.Error()) - } - - res := &multiDelResult{ - Deleted: []*multiDelDelete{}, - Error: []*multiDelError{}, - } - - for _, o := range req.Object { - if _, exists := b.bucket.objects[o.Key]; exists { - delete(b.bucket.objects, o.Key) - - res.Deleted = append(res.Deleted, &multiDelDelete{ - Key: o.Key, - }) - } else { - res.Error = append(res.Error, &multiDelError{ - Key: o.Key, - Code: "AccessDenied", - Message: "Access Denied", - }) - } - } - - return res -} - -// validBucketName returns whether name is a valid bucket name. -// Here are the rules, from: -// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html -// -// Can contain lowercase letters, numbers, periods (.), underscores (_), -// and dashes (-). You can use uppercase letters for buckets only in the -// US Standard region. -// -// Must start with a number or letter -// -// Must be between 3 and 255 characters long -// -// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4) -// but the real S3 server does not seem to check that rule, so we will not -// check it either. -// -func validBucketName(name string) bool { - if len(name) < 3 || len(name) > 255 { - return false - } - r := name[0] - if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') { - return false - } - for _, r := range name { - switch { - case r >= '0' && r <= '9': - case r >= 'a' && r <= 'z': - case r == '_' || r == '-': - case r == '.': - default: - return false - } - } - return true -} - -var responseParams = map[string]bool{ - "content-type": true, - "content-language": true, - "expires": true, - "cache-control": true, - "content-disposition": true, - "content-encoding": true, -} - -type objectResource struct { - name string - version string - bucket *bucket // always non-nil. - object *object // may be nil. -} - -// GET on an object gets the contents of the object. -// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html -func (objr objectResource) get(a *action) interface{} { - obj := objr.object - if obj == nil { - fatalf(404, "NoSuchKey", "The specified key does not exist.") - } - h := a.w.Header() - // add metadata - for name, d := range obj.meta { - h[name] = d - } - // override header values in response to request parameters. - for name, vals := range a.req.Form { - if strings.HasPrefix(name, "response-") { - name = name[len("response-"):] - if !responseParams[name] { - continue - } - h.Set(name, vals[0]) - } - } - - data := obj.data - status := http.StatusOK - if r := a.req.Header.Get("Range"); r != "" { - // s3 ignores invalid ranges - if matches := rangePattern.FindStringSubmatch(r); len(matches) == 3 { - var err error - start := 0 - end := len(obj.data) - 1 - if matches[1] != "" { - start, err = strconv.Atoi(matches[1]) - } - if err == nil && matches[2] != "" { - end, err = strconv.Atoi(matches[2]) - } - if err == nil && start >= 0 && end >= start { - if start >= len(obj.data) { - fatalf(416, "InvalidRequest", "The requested range is not satisfiable") - } - if end > len(obj.data)-1 { - end = len(obj.data) - 1 - } - data = obj.data[start : end+1] - status = http.StatusPartialContent - h.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, len(obj.data))) - } - } - } - // TODO Last-Modified-Since - // TODO If-Modified-Since - // TODO If-Unmodified-Since - // TODO If-Match - // TODO If-None-Match - // TODO Connection: close ?? - // TODO x-amz-request-id - h.Set("Content-Length", fmt.Sprint(len(data))) - h.Set("ETag", hex.EncodeToString(obj.checksum)) - h.Set("Last-Modified", obj.mtime.Format(time.RFC1123)) - - if status != http.StatusOK { - a.w.WriteHeader(status) - } - - if a.req.Method == "HEAD" { - return nil - } - // TODO avoid holding the lock when writing data. - _, err := a.w.Write(data) - if err != nil { - // we can't do much except just log the fact. - log.Printf("error writing data: %v", err) - } - return nil -} - -var metaHeaders = map[string]bool{ - "Content-MD5": true, - "x-amz-acl": true, - "Content-Type": true, - "Content-Encoding": true, - "Content-Disposition": true, -} - -// PUT on an object creates the object. -func (objr objectResource) put(a *action) interface{} { - // TODO Cache-Control header - // TODO Expires header - // TODO x-amz-server-side-encryption - // TODO x-amz-storage-class - - uploadId := a.req.URL.Query().Get("uploadId") - var partNumber uint - - // Check that the upload ID is valid if this is a multipart upload - if uploadId != "" { - if _, ok := objr.bucket.multipartUploads[uploadId]; !ok { - fatalf(404, "NoSuchUpload", "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.") - } - - partNumberStr := a.req.URL.Query().Get("partNumber") - - if partNumberStr == "" { - fatalf(400, "InvalidRequest", "Missing partNumber parameter") - } - - number, err := strconv.ParseUint(partNumberStr, 10, 32) - - if err != nil { - fatalf(400, "InvalidRequest", "partNumber is not a number") - } - - partNumber = uint(number) - } - - var expectHash []byte - if c := a.req.Header.Get("Content-MD5"); c != "" { - var err error - expectHash, err = base64.StdEncoding.DecodeString(c) - if err != nil || len(expectHash) != md5.Size { - fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid") - } - } - sum := md5.New() - // TODO avoid holding lock while reading data. - data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) - if err != nil { - fatalf(400, "TODO", "read error") - } - gotHash := sum.Sum(nil) - if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { - fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received") - } - if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { - fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") - } - - etag := fmt.Sprintf("\"%x\"", gotHash) - - a.w.Header().Add("ETag", etag) - - if uploadId == "" { - // For traditional uploads - - // TODO is this correct, or should we erase all previous metadata? - obj := objr.object - if obj == nil { - obj = &object{ - name: objr.name, - meta: make(http.Header), - } - } - - // PUT request has been successful - save data and metadata - for key, values := range a.req.Header { - key = http.CanonicalHeaderKey(key) - if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") { - obj.meta[key] = values - } - } - obj.data = data - obj.checksum = gotHash - obj.mtime = time.Now() - objr.bucket.objects[objr.name] = obj - } else { - // For multipart commit - - parts := objr.bucket.multipartUploads[uploadId] - part := &multipartUploadPart{ - index: partNumber, - data: data, - etag: etag, - lastModified: time.Now(), - } - - objr.bucket.multipartUploads[uploadId] = append(parts, part) - } - - return nil -} - -func (objr objectResource) delete(a *action) interface{} { - uploadId := a.req.URL.Query().Get("uploadId") - - if uploadId == "" { - // Traditional object delete - delete(objr.bucket.objects, objr.name) - } else { - // Multipart commit abort - _, ok := objr.bucket.multipartUploads[uploadId] - - if !ok { - fatalf(404, "NoSuchUpload", "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.") - } - - delete(objr.bucket.multipartUploads, uploadId) - } - return nil -} - -func (objr objectResource) post(a *action) interface{} { - // Check if we're initializing a multipart upload - if _, ok := a.req.URL.Query()["uploads"]; ok { - type multipartInitResponse struct { - XMLName struct{} `xml:"InitiateMultipartUploadResult"` - Bucket string - Key string - UploadId string - } - - uploadId := strconv.FormatInt(rand.Int63(), 16) - - objr.bucket.multipartUploads[uploadId] = []*multipartUploadPart{} - objr.bucket.multipartMeta[uploadId] = make(http.Header) - for key, values := range a.req.Header { - key = http.CanonicalHeaderKey(key) - if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") { - objr.bucket.multipartMeta[uploadId][key] = values - } - } - - return &multipartInitResponse{ - Bucket: objr.bucket.name, - Key: objr.name, - UploadId: uploadId, - } - } - - // Check if we're completing a multipart upload - if uploadId := a.req.URL.Query().Get("uploadId"); uploadId != "" { - type multipartCompleteRequestPart struct { - XMLName struct{} `xml:"Part"` - PartNumber uint - ETag string - } - - type multipartCompleteRequest struct { - XMLName struct{} `xml:"CompleteMultipartUpload"` - Part []multipartCompleteRequestPart - } - - type multipartCompleteResponse struct { - XMLName struct{} `xml:"CompleteMultipartUploadResult"` - Location string - Bucket string - Key string - ETag string - } - - parts, ok := objr.bucket.multipartUploads[uploadId] - - if !ok { - fatalf(404, "NoSuchUpload", "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.") - } - - req := &multipartCompleteRequest{} - - if err := xml.NewDecoder(a.req.Body).Decode(req); err != nil { - fatalf(400, "InvalidRequest", err.Error()) - } - - if len(req.Part) != len(parts) { - fatalf(400, "InvalidRequest", fmt.Sprintf("Number of parts does not match: expected %d, received %d", len(parts), len(req.Part))) - } - - sum := md5.New() - data := &bytes.Buffer{} - w := io.MultiWriter(sum, data) - - sort.Sort(multipartUploadPartByIndex(parts)) - - for i, p := range parts { - reqPart := req.Part[i] - - if reqPart.PartNumber != p.index { - fatalf(400, "InvalidRequest", "Bad part number") - } - - if reqPart.ETag != p.etag { - fatalf(400, "InvalidRequest", fmt.Sprintf("Invalid etag for part %d", reqPart.PartNumber)) - } - - w.Write(p.data) - } - - delete(objr.bucket.multipartUploads, uploadId) - - obj := objr.object - - if obj == nil { - obj = &object{ - name: objr.name, - meta: make(http.Header), - } - } - - obj.data = data.Bytes() - obj.checksum = sum.Sum(nil) - obj.mtime = time.Now() - objr.bucket.objects[objr.name] = obj - obj.meta = objr.bucket.multipartMeta[uploadId] - - objectLocation := fmt.Sprintf("http://%s/%s/%s", a.srv.listener.Addr().String(), objr.bucket.name, objr.name) - - return &multipartCompleteResponse{ - Location: objectLocation, - Bucket: objr.bucket.name, - Key: objr.name, - ETag: uploadId, - } - } - - fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") - return nil -} - -type CreateBucketConfiguration struct { - LocationConstraint string -} - -// locationConstraint parses the request body (if present). -// If there is no body, an empty string will be returned. -func locationConstraint(a *action) string { - var body bytes.Buffer - if _, err := io.Copy(&body, a.req.Body); err != nil { - fatalf(400, "InvalidRequest", err.Error()) - } - if body.Len() == 0 { - return "" - } - var loc CreateBucketConfiguration - if err := xml.NewDecoder(&body).Decode(&loc); err != nil { - fatalf(400, "InvalidRequest", err.Error()) - } - return loc.LocationConstraint -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign.go deleted file mode 100644 index 19642094..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign.go +++ /dev/null @@ -1,120 +0,0 @@ -package s3 - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "github.com/AdRoll/goamz/aws" - "log" - "sort" - "strings" -) - -var b64 = base64.StdEncoding - -// ---------------------------------------------------------------------------- -// S3 signing (http://goo.gl/G1LrK) - -var s3ParamsToSign = map[string]bool{ - "acl": true, - "location": true, - "logging": true, - "notification": true, - "partNumber": true, - "policy": true, - "requestPayment": true, - "torrent": true, - "uploadId": true, - "uploads": true, - "versionId": true, - "versioning": true, - "versions": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, - "website": true, - "delete": true, -} - -func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) { - var md5, ctype, date, xamz string - var xamzDate bool - var keys, sarray []string - xheaders := make(map[string]string) - for k, v := range headers { - k = strings.ToLower(k) - switch k { - case "content-md5": - md5 = v[0] - case "content-type": - ctype = v[0] - case "date": - if !xamzDate { - date = v[0] - } - default: - if strings.HasPrefix(k, "x-amz-") { - keys = append(keys, k) - xheaders[k] = strings.Join(v, ",") - if k == "x-amz-date" { - xamzDate = true - date = "" - } - } - } - } - if len(keys) > 0 { - sort.StringSlice(keys).Sort() - for i := range keys { - key := keys[i] - value := xheaders[key] - sarray = append(sarray, key+":"+value) - } - xamz = strings.Join(sarray, "\n") + "\n" - } - - expires := false - if v, ok := params["Expires"]; ok { - // Query string request authentication alternative. - expires = true - date = v[0] - params["AWSAccessKeyId"] = []string{auth.AccessKey} - } - - sarray = sarray[0:0] - for k, v := range params { - if s3ParamsToSign[k] { - for _, vi := range v { - if vi == "" { - sarray = append(sarray, k) - } else { - // "When signing you do not encode these values." - sarray = append(sarray, k+"="+vi) - } - } - } - } - if len(sarray) > 0 { - sort.StringSlice(sarray).Sort() - canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") - } - - payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath - hash := hmac.New(sha1.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - if expires { - params["Signature"] = []string{string(signature)} - } else { - headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)} - } - if debug { - log.Printf("Signature payload: %q", payload) - log.Printf("Signature: %q", signature) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign_test.go deleted file mode 100644 index 613dc766..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/AdRoll/goamz/s3/sign_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package s3_test - -import ( - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" - "gopkg.in/check.v1" -) - -// S3 ReST authentication docs: http://goo.gl/G1LrK - -var testAuth = aws.Auth{AccessKey: "0PN5J17HBGZHT7JJ3X82", SecretKey: "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o"} - -func (s *S) TestSignExampleObjectGet(c *check.C) { - method := "GET" - path := "/johnsmith/photos/puppy.jpg" - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 19:36:42 +0000"}, - } - s3.Sign(testAuth, method, path, nil, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=" - c.Assert(headers["Authorization"], check.DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleObjectPut(c *check.C) { - method := "PUT" - path := "/johnsmith/photos/puppy.jpg" - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 21:15:45 +0000"}, - "Content-Type": {"image/jpeg"}, - "Content-Length": {"94328"}, - } - s3.Sign(testAuth, method, path, nil, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=" - c.Assert(headers["Authorization"], check.DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleList(c *check.C) { - method := "GET" - path := "/johnsmith/" - params := map[string][]string{ - "prefix": {"photos"}, - "max-keys": {"50"}, - "marker": {"puppy"}, - } - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 19:42:41 +0000"}, - "User-Agent": {"Mozilla/5.0"}, - } - s3.Sign(testAuth, method, path, params, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=" - c.Assert(headers["Authorization"], check.DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleFetch(c *check.C) { - method := "GET" - path := "/johnsmith/" - params := map[string][]string{ - "acl": {""}, - } - headers := map[string][]string{ - "Host": {"johnsmith.s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 19:44:46 +0000"}, - } - s3.Sign(testAuth, method, path, params, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=" - c.Assert(headers["Authorization"], check.DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleDelete(c *check.C) { - method := "DELETE" - path := "/johnsmith/photos/puppy.jpg" - params := map[string][]string{} - headers := map[string][]string{ - "Host": {"s3.amazonaws.com"}, - "Date": {"Tue, 27 Mar 2007 21:20:27 +0000"}, - "User-Agent": {"dotnet"}, - "x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"}, - } - s3.Sign(testAuth, method, path, params, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk=" - c.Assert(headers["Authorization"], check.DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleUpload(c *check.C) { - method := "PUT" - path := "/static.johnsmith.net/db-backup.dat.gz" - params := map[string][]string{} - headers := map[string][]string{ - "Host": {"static.johnsmith.net:8080"}, - "Date": {"Tue, 27 Mar 2007 21:06:08 +0000"}, - "User-Agent": {"curl/7.15.5"}, - "x-amz-acl": {"public-read"}, - "content-type": {"application/x-download"}, - "Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="}, - "X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"}, - "X-Amz-Meta-FileChecksum": {"0x02661779"}, - "X-Amz-Meta-ChecksumAlgorithm": {"crc32"}, - "Content-Disposition": {"attachment; filename=database.dat"}, - "Content-Encoding": {"gzip"}, - "Content-Length": {"5913339"}, - } - s3.Sign(testAuth, method, path, params, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=" - c.Assert(headers["Authorization"], check.DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleListAllMyBuckets(c *check.C) { - method := "GET" - path := "/" - headers := map[string][]string{ - "Host": {"s3.amazonaws.com"}, - "Date": {"Wed, 28 Mar 2007 01:29:59 +0000"}, - } - s3.Sign(testAuth, method, path, nil, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA=" - c.Assert(headers["Authorization"], check.DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleUnicodeKeys(c *check.C) { - method := "GET" - path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re" - headers := map[string][]string{ - "Host": {"s3.amazonaws.com"}, - "Date": {"Wed, 28 Mar 2007 01:49:49 +0000"}, - } - s3.Sign(testAuth, method, path, nil, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY=" - c.Assert(headers["Authorization"], check.DeepEquals, []string{expected}) -} - -func (s *S) TestSignExampleCustomSSE(c *check.C) { - method := "GET" - path := "/secret/config" - params := map[string][]string{} - headers := map[string][]string{ - "Host": {"secret.johnsmith.net:8080"}, - "Date": {"Tue, 27 Mar 2007 21:06:08 +0000"}, - "x-amz-server-side-encryption-customer-key": {"MWJhakVna1dQT1B0SDFMeGtVVnRQRTFGaU1ldFJrU0I="}, - "x-amz-server-side-encryption-customer-key-MD5": {"glIqxpqQ4a9aoK/iLttKzQ=="}, - "x-amz-server-side-encryption-customer-algorithm": {"AES256"}, - } - s3.Sign(testAuth, method, path, params, headers) - expected := "AWS 0PN5J17HBGZHT7JJ3X82:Xq6PWmIo0aOWq+LDjCEiCGgbmHE=" - c.Assert(headers["Authorization"], check.DeepEquals, []string{expected}) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.gitignore b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.gitignore deleted file mode 100644 index c4c1f537..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# Editor swap files -*.swp -*~ -.DS_Store diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.travis.yml deleted file mode 100644 index e6fabccb..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false - -language: go - -before_script: - - go get -u golang.org/x/tools/cmd/vet - - go get -u github.com/golang/lint/golint - -go: tip -script: - - test -z "$(gofmt -s -l -w management | tee /dev/stderr)" - - test -z "$(gofmt -s -l -w storage | tee /dev/stderr)" - - go build -v ./... - - go test -v ./storage/... -check.v - - test -z "$(golint ./storage/... | tee /dev/stderr)" - - go vet ./storage/... - - go test -v ./management/... - - test -z "$(golint ./management/... | grep -v 'should have comment' | grep -v 'stutters' | tee /dev/stderr)" - - go vet ./management/... diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/README.md deleted file mode 100644 index 13d54857..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# Microsoft Azure SDK for Go - -This project provides various Go packages to perform operations -on Microsoft Azure REST APIs. - -[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) [![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go) - -See list of implemented API clients [here](http://godoc.org/github.com/Azure/azure-sdk-for-go). - -> **NOTE:** This repository is under heavy ongoing development and -is likely to break over time. We currently do not have any releases -yet. If you are planning to use the repository, please consider vendoring -the packages in your project and update them when a stable tag is out. - -# Installation - - go get -d github.com/Azure/azure-sdk-for-go/management - -# Usage - -Read Godoc of the repository at: http://godoc.org/github.com/Azure/azure-sdk-for-go/ - -The client currently supports authentication to the Service Management -API with certificates or Azure `.publishSettings` file. You can -download the `.publishSettings` file for your subscriptions -[here](https://manage.windowsazure.com/publishsettings). - -### Example: Creating a Linux Virtual Machine - -```go -package main - -import ( - "encoding/base64" - "fmt" - - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/hostedservice" - "github.com/Azure/azure-sdk-for-go/management/virtualmachine" - "github.com/Azure/azure-sdk-for-go/management/vmutils" -) - -func main() { - dnsName := "test-vm-from-go" - storageAccount := "mystorageaccount" - location := "West US" - vmSize := "Small" - vmImage := "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" - userName := "testuser" - userPassword := "Test123" - - client, err := management.ClientFromPublishSettingsFile("path/to/downloaded.publishsettings", "") - if err != nil { - panic(err) - } - - // create hosted service - if err := hostedservice.NewClient(client).CreateHostedService(hostedservice.CreateHostedServiceParameters{ - ServiceName: dnsName, - Location: location, - Label: base64.StdEncoding.EncodeToString([]byte(dnsName))}); err != nil { - panic(err) - } - - // create virtual machine - role := vmutils.NewVMConfiguration(dnsName, vmSize) - vmutils.ConfigureDeploymentFromPlatformImage( - &role, - vmImage, - fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", storageAccount, dnsName), - "") - vmutils.ConfigureForLinux(&role, dnsName, userName, userPassword) - vmutils.ConfigureWithPublicSSH(&role) - - operationID, err := virtualmachine.NewClient(client). - CreateDeployment(role, dnsName, virtualmachine.CreateDeploymentOptions{}) - if err != nil { - panic(err) - } - if err := client.WaitForOperation(operationID, nil); err != nil { - panic(err) - } -} -``` - -# License - -This project is published under [Apache 2.0 License](LICENSE). diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob.go deleted file mode 100644 index a6e2eb8a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ /dev/null @@ -1,858 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// BlobStorageClient contains operations for Microsoft Azure Blob Storage -// Service. -type BlobStorageClient struct { - client Client -} - -// A Container is an entry in ContainerListResponse. -type Container struct { - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - // TODO (ahmetalpbalkan) Metadata -} - -// ContainerProperties contains various properties of a container returned from -// various endpoints like ListContainers. -type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - // TODO (ahmetalpbalkan) remaining fields -} - -// ContainerListResponse contains the response fields from -// ListContainers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ContainerListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` -} - -// A Blob is an entry in BlobListResponse. -type Blob struct { - Name string `xml:"Name"` - Properties BlobProperties `xml:"Properties"` - // TODO (ahmetalpbalkan) Metadata -} - -// BlobProperties contains various properties of a blob -// returned in various endpoints like ListBlobs or GetBlobProperties. -type BlobProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - ContentMD5 string `xml:"Content-MD5"` - ContentLength int64 `xml:"Content-Length"` - ContentType string `xml:"Content-Type"` - ContentEncoding string `xml:"Content-Encoding"` - BlobType BlobType `xml:"x-ms-blob-blob-type"` - SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` - CopyID string `xml:"CopyId"` - CopyStatus string `xml:"CopyStatus"` - CopySource string `xml:"CopySource"` - CopyProgress string `xml:"CopyProgress"` - CopyCompletionTime string `xml:"CopyCompletionTime"` - CopyStatusDescription string `xml:"CopyStatusDescription"` -} - -// BlobListResponse contains the response fields from ListBlobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type BlobListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Blobs []Blob `xml:"Blobs>Blob"` -} - -// ListContainersParameters defines the set of customizable parameters to make a -// List Containers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ListContainersParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListContainersParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// ListBlobsParameters defines the set of customizable -// parameters to make a List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type ListBlobsParameters struct { - Prefix string - Delimiter string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListBlobsParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// BlobType defines the type of the Azure Blob. -type BlobType string - -// Types of page blobs -const ( - BlobTypeBlock BlobType = "BlockBlob" - BlobTypePage BlobType = "PageBlob" -) - -// PageWriteType defines the type updates that are going to be -// done on the page blob. -type PageWriteType string - -// Types of operations on page blobs -const ( - PageWriteTypeUpdate PageWriteType = "update" - PageWriteTypeClear PageWriteType = "clear" -) - -const ( - blobCopyStatusPending = "pending" - blobCopyStatusSuccess = "success" - blobCopyStatusAborted = "aborted" - blobCopyStatusFailed = "failed" -) - -// BlockListType is used to filter out types of blocks in a Get Blocks List call -// for a block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all -// block types. -type BlockListType string - -// Filters for listing blocks in block blobs -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// ContainerAccessType defines the access level to the container from a public -// request. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- -// blob-public-access" header. -type ContainerAccessType string - -// Access options for containers -const ( - ContainerAccessTypePrivate ContainerAccessType = "" - ContainerAccessTypeBlob ContainerAccessType = "blob" - ContainerAccessTypeContainer ContainerAccessType = "container" -) - -// Maximum sizes (per REST API) for various concepts -const ( - MaxBlobBlockSize = 4 * 1024 * 1024 - MaxBlobPageSize = 4 * 1024 * 1024 -) - -// BlockStatus defines states a block for a block blob can -// be in. -type BlockStatus string - -// List of statuses that can be used to refer to a block in a block list -const ( - BlockStatusUncommitted BlockStatus = "Uncommitted" - BlockStatusCommitted BlockStatus = "Committed" - BlockStatusLatest BlockStatus = "Latest" -) - -// Block is used to create Block entities for Put Block List -// call. -type Block struct { - ID string - Status BlockStatus -} - -// BlockListResponse contains the response fields from Get Block List call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -type BlockListResponse struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` - UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` -} - -// BlockResponse contains the block information returned -// in the GetBlockListCall. -type BlockResponse struct { - Name string `xml:"Name"` - Size int64 `xml:"Size"` -} - -// GetPageRangesResponse contains the reponse fields from -// Get Page Ranges call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type GetPageRangesResponse struct { - XMLName xml.Name `xml:"PageList"` - PageList []PageRange `xml:"PageRange"` -} - -// PageRange contains information about a page of a page blob from -// Get Pages Range call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -var ( - errBlobCopyAborted = errors.New("storage: blob copy is aborted") - errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") -) - -// ListContainers returns the list of containers in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, "", q) - headers := b.client.getStandardHeaders() - - var out ContainerListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// CreateContainer creates a blob container within the storage account -// with given name and access level. Returns error if container already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx -func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { - resp, err := b.createContainer(name, access) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CreateContainerIfNotExists creates a blob container if it does not exist. Returns -// true if container is newly created or false if container already exists. -func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { - resp, err := b.createContainer(name, access) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { - verb := "PUT" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - headers["Content-Length"] = "0" - if access != "" { - headers["x-ms-blob-public-access"] = string(access) - } - return b.client.exec(verb, uri, headers, nil) -} - -// ContainerExists returns true if a container with given name exists -// on the storage account, otherwise returns false. -func (b BlobStorageClient) ContainerExists(name string) (bool, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - headers := b.client.getStandardHeaders() - - resp, err := b.client.exec(verb, uri, headers, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// DeleteContainer deletes the container with given name on the storage -// account. If the container does not exist returns error. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainer(name string) error { - resp, err := b.deleteContainer(name) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteContainerIfExists deletes the container with given name on the storage -// account if it exists. Returns true if container is deleted with this call, or -// false if the container did not exist at the time of the Delete Container -// operation. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { - resp, err := b.deleteContainer(name) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { - verb := "DELETE" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - return b.client.exec(verb, uri, headers, nil) -} - -// ListBlobs returns an object that contains list of blobs in the container, -// pagination token and other information in the response of List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{ - "restype": {"container"}, - "comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) - headers := b.client.getStandardHeaders() - - var out BlobListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// BlobExists returns true if a blob with given name exists on the specified -// container of the storage account. -func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(verb, uri, headers, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// GetBlobURL gets the canonical URL to the blob with the specified name in the -// specified container. This method does not create a publicly accessible URL if -// the blob or container is private and this method does not check if the blob -// exists. -func (b BlobStorageClient) GetBlobURL(container, name string) string { - if container == "" { - container = "$root" - } - return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) -} - -// GetBlob returns a stream to read the blob. Caller must call Close() the -// reader to close on the underlying connection. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, "") - if err != nil { - return nil, err - } - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - return resp.body, nil -} - -// GetBlobRange reads the specified range of a blob to a stream. The bytesRange -// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, bytesRange) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { - return nil, err - } - return resp.body, nil -} - -func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*storageResponse, error) { - verb := "GET" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - if bytesRange != "" { - headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) - } - resp, err := b.client.exec(verb, uri, headers, nil) - if err != nil { - return nil, err - } - return resp, err -} - -// GetBlobProperties provides various information about the specified -// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx -func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(verb, uri, headers, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - var contentLength int64 - contentLengthStr := resp.headers.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return nil, err - } - } - - var sequenceNum int64 - sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") - if sequenceNumStr != "" { - sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) - if err != nil { - return nil, err - } - } - - return &BlobProperties{ - LastModified: resp.headers.Get("Last-Modified"), - Etag: resp.headers.Get("Etag"), - ContentMD5: resp.headers.Get("Content-MD5"), - ContentLength: contentLength, - ContentEncoding: resp.headers.Get("Content-Encoding"), - SequenceNumber: sequenceNum, - CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), - CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), - CopyID: resp.headers.Get("x-ms-copy-id"), - CopyProgress: resp.headers.Get("x-ms-copy-progress"), - CopySource: resp.headers.Get("x-ms-copy-source"), - CopyStatus: resp.headers.Get("x-ms-copy-status"), - BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), - }, nil -} - -// CreateBlockBlob initializes an empty block blob with no blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlob(container, name string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", 0) - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlock saves the given data chunk to the specified block blob with -// given ID. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { - return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk)) -} - -// PutBlockWithLength saves the given data stream of exactly specified size to -// the block blob with given ID. It is an alternative to PutBlocks where data -// comes as stream but the length is known in advance. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader) error { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", size) - - resp, err := b.client.exec("PUT", uri, headers, blob) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlockList saves list of blocks to the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx -func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { - blockListXML := prepareBlockListRequest(blocks) - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) - headers := b.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - - resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML)) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetBlockList retrieves list of blocks in the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { - params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - var out BlockListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutPageBlob initializes an empty page blob with specified name and maximum -// size in bytes (size must be aligned to a 512-byte boundary). A page blob must -// be created using this method before writing pages. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutPageBlob(container, name string, size int64) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) - headers["Content-Length"] = fmt.Sprintf("%v", 0) - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutPage writes a range of pages to a page blob or clears the given range. -// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned -// with 512-byte boundaries and chunk must be of size multiplies by 512. -// -// See https://msdn.microsoft.com/en-us/library/ee691975.aspx -func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-page-write"] = string(writeType) - headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) - - var contentLength int64 - var data io.Reader - if writeType == PageWriteTypeClear { - contentLength = 0 - data = bytes.NewReader([]byte{}) - } else { - contentLength = int64(len(chunk)) - data = bytes.NewReader(chunk) - } - headers["Content-Length"] = fmt.Sprintf("%v", contentLength) - - resp, err := b.client.exec("PUT", uri, headers, data) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetPageRanges returns the list of valid page ranges for a page blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) - headers := b.client.getStandardHeaders() - - var out GetPageRangesResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return out, err - } - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// CopyBlob starts a blob copy operation and waits for the operation to -// complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore -// this helper method works faster on smaller files. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { - copyID, err := b.startBlobCopy(container, name, sourceBlob) - if err != nil { - return err - } - - return b.waitForBlobCopy(container, name, copyID) -} - -func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - headers["Content-Length"] = "0" - headers["x-ms-copy-source"] = sourceBlob - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return "", err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { - return "", err - } - - copyID := resp.headers.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} - -func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error { - for { - props, err := b.GetBlobProperties(container, name) - if err != nil { - return err - } - - if props.CopyID != copyID { - return errBlobCopyIDMismatch - } - - switch props.CopyStatus { - case blobCopyStatusSuccess: - return nil - case blobCopyStatusPending: - continue - case blobCopyStatusAborted: - return errBlobCopyAborted - case blobCopyStatusFailed: - return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) - default: - return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) - } - } -} - -// DeleteBlob deletes the given blob from the specified container. -// If the blob does not exists at the time of the Delete Blob operation, it -// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlob(container, name string) error { - resp, err := b.deleteBlob(container, name) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteBlobIfExists deletes the given blob from the specified container If the -// blob is deleted with this call, returns true. Otherwise returns false. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) { - resp, err := b.deleteBlob(container, name) - if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) { - return resp.statusCode == http.StatusAccepted, nil - } - defer resp.body.Close() - return false, err -} - -func (b BlobStorageClient) deleteBlob(container, name string) (*storageResponse, error) { - verb := "DELETE" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() - - return b.client.exec(verb, uri, headers, nil) -} - -// helper method to construct the path to a container given its name -func pathForContainer(name string) string { - return fmt.Sprintf("/%s", name) -} - -// helper method to construct the path to a blob given its container and blob -// name -func pathForBlob(container, name string) string { - return fmt.Sprintf("/%s/%s", container, name) -} - -// GetBlobSASURI creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { - var ( - signedPermissions = permissions - blobURL = b.GetBlobURL(container, name) - ) - canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) - if err != nil { - return "", err - } - signedExpiry := expiry.Format(time.RFC3339) - signedResource := "b" - - stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions) - if err != nil { - return "", err - } - - sig := b.client.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {b.client.apiVersion}, - "se": {signedExpiry}, - "sr": {signedResource}, - "sp": {signedPermissions}, - "sig": {sig}, - } - - sasURL, err := url.Parse(blobURL) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) { - var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go deleted file mode 100644 index 14a2f6b2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go +++ /dev/null @@ -1,625 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/rand" - "encoding/base64" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "sort" - "sync" - "testing" - "time" - - chk "gopkg.in/check.v1" -) - -type StorageBlobSuite struct{} - -var _ = chk.Suite(&StorageBlobSuite{}) - -const testContainerPrefix = "zzzztest-" - -func getBlobClient(c *chk.C) BlobStorageClient { - return getBasicClient(c).GetBlobService() -} - -func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) { - c.Assert(pathForContainer("foo"), chk.Equals, "/foo") -} - -func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) { - c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob") -} - -func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) { - _, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP") - c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15 - - out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP") - c.Assert(err, chk.IsNil) - c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n") -} - -func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) { - api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true) - c.Assert(err, chk.IsNil) - cli := api.GetBlobService() - expiry := time.Time{} - - expectedParts := url.URL{ - Scheme: "https", - Host: "foo.blob.core.windows.net", - Path: "container/name", - RawQuery: url.Values{ - "sv": {"2013-08-15"}, - "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, - "sr": {"b"}, - "sp": {"r"}, - "se": {"0001-01-01T00:00:00Z"}, - }.Encode()} - - u, err := cli.GetBlobSASURI("container", "name", expiry, "r") - c.Assert(err, chk.IsNil) - sasParts, err := url.Parse(u) - c.Assert(err, chk.IsNil) - c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) - c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) -} - -func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - blob := randString(20) - body := []byte(randString(100)) - expiry := time.Now().UTC().Add(time.Hour) - permissions := "r" - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.DeleteContainer(cnt) - - c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil) - - sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions) - c.Assert(err, chk.IsNil) - - resp, err := http.Get(sasURI) - c.Assert(err, chk.IsNil) - - blobResp, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - c.Assert(err, chk.IsNil) - - c.Assert(resp.StatusCode, chk.Equals, http.StatusOK) - c.Assert(len(blobResp), chk.Equals, len(body)) -} - -func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) { - cli := getBlobClient(c) - c.Assert(deleteTestContainers(cli), chk.IsNil) - - const n = 5 - const pageSize = 2 - - // Create test containers - created := []string{} - for i := 0; i < n; i++ { - name := randContainer() - c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil) - created = append(created, name) - } - sort.Strings(created) - - // Defer test container deletions - defer func() { - var wg sync.WaitGroup - for _, cnt := range created { - wg.Add(1) - go func(name string) { - c.Assert(cli.DeleteContainer(name), chk.IsNil) - wg.Done() - }(cnt) - } - wg.Wait() - }() - - // Paginate results - seen := []string{} - marker := "" - for { - resp, err := cli.ListContainers(ListContainersParameters{ - Prefix: testContainerPrefix, - MaxResults: pageSize, - Marker: marker}) - c.Assert(err, chk.IsNil) - - containers := resp.Containers - if len(containers) > pageSize { - c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers)) - } - - for _, c := range containers { - seen = append(seen, c.Name) - } - - marker = resp.NextMarker - if marker == "" || len(containers) == 0 { - break - } - } - - c.Assert(seen, chk.DeepEquals, created) -} - -func (s *StorageBlobSuite) TestContainerExists(c *chk.C) { - cnt := randContainer() - cli := getBlobClient(c) - ok, err := cli.ContainerExists(cnt) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) - defer cli.DeleteContainer(cnt) - - ok, err = cli.ContainerExists(cnt) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) -} - -func (s *StorageBlobSuite) TestCreateDeleteContainer(c *chk.C) { - cnt := randContainer() - cli := getBlobClient(c) - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - c.Assert(cli.DeleteContainer(cnt), chk.IsNil) -} - -func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) { - cnt := randContainer() - cli := getBlobClient(c) - - // First create - ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) - - // Second create, should not give errors - ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) - c.Assert(err, chk.IsNil) - defer cli.DeleteContainer(cnt) - c.Assert(ok, chk.Equals, false) -} - -func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) { - cnt := randContainer() - cli := getBlobClient(c) - - // Nonexisting container - c.Assert(cli.DeleteContainer(cnt), chk.NotNil) - - ok, err := cli.DeleteContainerIfExists(cnt) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) - - // Existing container - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - ok, err = cli.DeleteContainerIfExists(cnt) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) -} - -func (s *StorageBlobSuite) TestBlobExists(c *chk.C) { - cnt := randContainer() - blob := randString(20) - cli := getBlobClient(c) - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) - defer cli.DeleteContainer(cnt) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil) - defer cli.DeleteBlob(cnt, blob) - - ok, err := cli.BlobExists(cnt, blob+".foo") - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) - - ok, err = cli.BlobExists(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) -} - -func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) { - api, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - cli := api.GetBlobService() - - c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob") - c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob") - c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob") -} - -func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) { - if testing.Short() { - c.Skip("skipping blob copy in short mode, no SLA on async operation") - } - - cli := getBlobClient(c) - cnt := randContainer() - src := randString(20) - dst := randString(20) - body := []byte(randString(1024)) - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) - defer cli.DeleteBlob(cnt, src) - - c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil) - defer cli.DeleteBlob(cnt, dst) - - blobBody, err := cli.GetBlob(cnt, dst) - c.Assert(err, chk.IsNil) - - b, err := ioutil.ReadAll(blobBody) - defer blobBody.Close() - c.Assert(err, chk.IsNil) - c.Assert(b, chk.DeepEquals, body) -} - -func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) { - cnt := randContainer() - blob := randString(20) - - cli := getBlobClient(c) - c.Assert(cli.DeleteBlob(cnt, blob), chk.NotNil) - - ok, err := cli.DeleteBlobIfExists(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) -} - -func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) { - cnt := randContainer() - blob := randString(20) - contents := randString(64) - - cli := getBlobClient(c) - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.DeleteContainer(cnt) - - // Nonexisting blob - _, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.NotNil) - - // Put the blob - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil) - - // Get blob properties - props, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - - c.Assert(props.ContentLength, chk.Equals, int64(len(contents))) - c.Assert(props.BlobType, chk.Equals, BlobTypeBlock) -} - -func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.DeleteContainer(cnt) - - blobs := []string{} - const n = 5 - const pageSize = 2 - for i := 0; i < n; i++ { - name := randString(20) - c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil) - blobs = append(blobs, name) - } - sort.Strings(blobs) - - // Paginate - seen := []string{} - marker := "" - for { - resp, err := cli.ListBlobs(cnt, ListBlobsParameters{ - MaxResults: pageSize, - Marker: marker}) - c.Assert(err, chk.IsNil) - - for _, v := range resp.Blobs { - seen = append(seen, v.Name) - } - - marker = resp.NextMarker - if marker == "" || len(resp.Blobs) == 0 { - break - } - } - - // Compare - c.Assert(seen, chk.DeepEquals, blobs) -} - -func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randString(20) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - - props, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(props.ContentLength, chk.Not(chk.Equals), 0) -} - -func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) { - cnt := randContainer() - blob := randString(20) - body := "0123456789" - - cli := getBlobClient(c) - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) - defer cli.DeleteContainer(cnt) - - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil) - defer cli.DeleteBlob(cnt, blob) - - // Read 1-3 - for _, r := range []struct { - rangeStr string - expected string - }{ - {"0-", body}, - {"1-3", body[1 : 3+1]}, - {"3-", body[3:]}, - } { - resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr) - c.Assert(err, chk.IsNil) - blobBody, err := ioutil.ReadAll(resp) - c.Assert(err, chk.IsNil) - - str := string(blobBody) - c.Assert(str, chk.Equals, r.expected) - } -} - -func (s *StorageBlobSuite) TestPutBlock(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randString(20) - chunk := []byte(randString(1024)) - blockID := base64.StdEncoding.EncodeToString([]byte("foo")) - c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) -} - -func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randString(20) - chunk := []byte(randString(1024)) - blockID := base64.StdEncoding.EncodeToString([]byte("foo")) - - // Put one block - c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) - defer cli.deleteBlob(cnt, blob) - - // Get committed blocks - committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted) - c.Assert(err, chk.IsNil) - - if len(committed.CommittedBlocks) > 0 { - c.Fatal("There are committed blocks") - } - - // Get uncommitted blocks - uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted) - c.Assert(err, chk.IsNil) - - c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1) - // Commit block list - c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil) - - // Get all blocks - all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) - c.Assert(err, chk.IsNil) - c.Assert(len(all.CommittedBlocks), chk.Equals, 1) - c.Assert(len(all.UncommittedBlocks), chk.Equals, 0) - - // Verify the block - thatBlock := all.CommittedBlocks[0] - c.Assert(thatBlock.Name, chk.Equals, blockID) - c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk))) -} - -func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randString(20) - c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil) - - // Verify - blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) - c.Assert(err, chk.IsNil) - c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0) - c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0) -} - -func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randString(20) - size := int64(10 * 1024 * 1024) - c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) - - // Verify - props, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(props.ContentLength, chk.Equals, size) - c.Assert(props.BlobType, chk.Equals, BlobTypePage) -} - -func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randString(20) - size := int64(10 * 1024 * 1024) // larger than we'll use - c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) - - chunk1 := []byte(randString(1024)) - chunk2 := []byte(randString(512)) - - // Append chunks - c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1), chk.IsNil) - c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2), chk.IsNil) - - // Verify contents - out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) - c.Assert(err, chk.IsNil) - defer out.Close() - blobContents, err := ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) - out.Close() - - // Overwrite first half of chunk1 - chunk0 := []byte(randString(512)) - c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0), chk.IsNil) - - // Verify contents - out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) - c.Assert(err, chk.IsNil) - defer out.Close() - blobContents, err = ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...)) -} - -func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randString(20) - size := int64(10 * 1024 * 1024) // larger than we'll use - c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) - - // Put 0-2047 - chunk := []byte(randString(2048)) - c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk), chk.IsNil) - - // Clear 512-1023 - c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil), chk.IsNil) - - // Verify contents - out, err := cli.GetBlobRange(cnt, blob, "0-2047") - c.Assert(err, chk.IsNil) - contents, err := ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - defer out.Close() - c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...)) -} - -func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randString(20) - size := int64(10 * 1024 * 1024) // larger than we'll use - c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) - - // Get page ranges on empty blob - out, err := cli.GetPageRanges(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(len(out.PageList), chk.Equals, 0) - - // Add 0-512 page - c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512))), chk.IsNil) - - out, err = cli.GetPageRanges(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(len(out.PageList), chk.Equals, 1) - - // Add 1024-2048 - c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024))), chk.IsNil) - - out, err = cli.GetPageRanges(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(len(out.PageList), chk.Equals, 2) -} - -func deleteTestContainers(cli BlobStorageClient) error { - for { - resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix}) - if err != nil { - return err - } - if len(resp.Containers) == 0 { - break - } - for _, c := range resp.Containers { - err = cli.DeleteContainer(c.Name) - if err != nil { - return err - } - } - } - return nil -} - -func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error { - if len(chunk) > MaxBlobBlockSize { - return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize) - } - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) - - resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) - if err != nil { - return err - } - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -func randContainer() string { - return testContainerPrefix + randString(32-len(testContainerPrefix)) -} - -func randString(n int) string { - if n <= 0 { - panic("negative number") - } - const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz" - var bytes = make([]byte, n) - rand.Read(bytes) - for i, b := range bytes { - bytes[i] = alphanum[b%byte(len(alphanum))] - } - return string(bytes) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client.go deleted file mode 100644 index 6c171050..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client.go +++ /dev/null @@ -1,359 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -import ( - "bytes" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "sort" - "strings" -) - -const ( - // DefaultBaseURL is the domain name used for storage requests when a - // default client is created. - DefaultBaseURL = "core.windows.net" - - // DefaultAPIVersion is the Azure Storage API version string used when a - // basic client is created. - DefaultAPIVersion = "2014-02-14" - - defaultUseHTTPS = true - - blobServiceName = "blob" - tableServiceName = "table" - queueServiceName = "queue" -) - -// Client is the object that needs to be constructed to perform -// operations on the storage account. -type Client struct { - accountName string - accountKey []byte - useHTTPS bool - baseURL string - apiVersion string -} - -type storageResponse struct { - statusCode int - headers http.Header - body io.ReadCloser -} - -// AzureStorageServiceError contains fields of the error response from -// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx -// Some fields might be specific to certain calls. -type AzureStorageServiceError struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` - QueryParameterName string `xml:"QueryParameterName"` - QueryParameterValue string `xml:"QueryParameterValue"` - Reason string `xml:"Reason"` - StatusCode int - RequestID string -} - -// UnexpectedStatusCodeError is returned when a storage service responds with neither an error -// nor with an HTTP status code indicating success. -type UnexpectedStatusCodeError struct { - allowed []int - got int -} - -func (e UnexpectedStatusCodeError) Error() string { - s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } - - got := s(e.got) - expected := []string{} - for _, v := range e.allowed { - expected = append(expected, s(v)) - } - return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) -} - -// NewBasicClient constructs a Client with given storage service name and -// key. -func NewBasicClient(accountName, accountKey string) (Client, error) { - return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) -} - -// NewClient constructs a Client. This should be used if the caller wants -// to specify whether to use HTTPS, a specific REST API version or a custom -// storage endpoint than Azure Public Cloud. -func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { - var c Client - if accountName == "" { - return c, fmt.Errorf("azure: account name required") - } else if accountKey == "" { - return c, fmt.Errorf("azure: account key required") - } else if blobServiceBaseURL == "" { - return c, fmt.Errorf("azure: base storage service url required") - } - - key, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return c, err - } - - return Client{ - accountName: accountName, - accountKey: key, - useHTTPS: useHTTPS, - baseURL: blobServiceBaseURL, - apiVersion: apiVersion, - }, nil -} - -func (c Client) getBaseURL(service string) string { - scheme := "http" - if c.useHTTPS { - scheme = "https" - } - - host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) - - u := &url.URL{ - Scheme: scheme, - Host: host} - return u.String() -} - -func (c Client) getEndpoint(service, path string, params url.Values) string { - u, err := url.Parse(c.getBaseURL(service)) - if err != nil { - // really should not be happening - panic(err) - } - - if path == "" { - path = "/" // API doesn't accept path segments not starting with '/' - } - - u.Path = path - u.RawQuery = params.Encode() - return u.String() -} - -// GetBlobService returns a BlobStorageClient which can operate on the blob -// service of the storage account. -func (c Client) GetBlobService() BlobStorageClient { - return BlobStorageClient{c} -} - -// GetQueueService returns a QueueServiceClient which can operate on the queue -// service of the storage account. -func (c Client) GetQueueService() QueueServiceClient { - return QueueServiceClient{c} -} - -func (c Client) createAuthorizationHeader(canonicalizedString string) string { - signature := c.computeHmac256(canonicalizedString) - return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature) -} - -func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { - canonicalizedResource, err := c.buildCanonicalizedResource(url) - if err != nil { - return "", err - } - - canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource) - return c.createAuthorizationHeader(canonicalizedString), nil -} - -func (c Client) getStandardHeaders() map[string]string { - return map[string]string{ - "x-ms-version": c.apiVersion, - "x-ms-date": currentTimeRfc1123Formatted(), - } -} - -func (c Client) buildCanonicalizedHeader(headers map[string]string) string { - cm := make(map[string]string) - - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - match, _ := regexp.MatchString("x-ms-", headerName) - if match { - cm[headerName] = v - } - } - - if len(cm) == 0 { - return "" - } - - keys := make([]string, 0, len(cm)) - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := "" - - for i, key := range keys { - if i == len(keys)-1 { - ch += fmt.Sprintf("%s:%s", key, cm[key]) - } else { - ch += fmt.Sprintf("%s:%s\n", key, cm[key]) - } - } - return ch -} - -func (c Client) buildCanonicalizedResource(uri string) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := "/" + c.accountName - if len(u.Path) > 0 { - cr += u.Path - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - if len(params) > 0 { - cr += "\n" - keys := make([]string, 0, len(params)) - for key := range params { - keys = append(keys, key) - } - - sort.Strings(keys) - - for i, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - if i == len(keys)-1 { - cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")) - } else { - cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ",")) - } - } - } - return cr, nil -} - -func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { - canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", - verb, - headers["Content-Encoding"], - headers["Content-Language"], - headers["Content-Length"], - headers["Content-MD5"], - headers["Content-Type"], - headers["Date"], - headers["If-Modified-Singe"], - headers["If-Match"], - headers["If-None-Match"], - headers["If-Unmodified-Singe"], - headers["Range"], - c.buildCanonicalizedHeader(headers), - canonicalizedResource) - - return canonicalizedString -} - -func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { - authHeader, err := c.getAuthorizationHeader(verb, url, headers) - if err != nil { - return nil, err - } - headers["Authorization"] = authHeader - - if err != nil { - return nil, err - } - - req, err := http.NewRequest(verb, url, body) - for k, v := range headers { - req.Header.Add(k, v) - } - httpClient := http.Client{} - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - var respBody []byte - respBody, err = readResponseBody(resp) - if err != nil { - return nil, err - } - - if len(respBody) == 0 { - // no error in response body - err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status) - } else { - // response contains storage service error object, unmarshal - storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id")) - if err != nil { // error unmarshaling the error response - err = errIn - } - err = storageErr - } - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ - }, err - } - - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: resp.Body}, nil -} - -func readResponseBody(resp *http.Response) ([]byte, error) { - defer resp.Body.Close() - out, err := ioutil.ReadAll(resp.Body) - if err == io.EOF { - err = nil - } - return out, err -} - -func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { - var storageErr AzureStorageServiceError - if err := xml.Unmarshal(body, &storageErr); err != nil { - return storageErr, err - } - storageErr.StatusCode = statusCode - storageErr.RequestID = requestID - return storageErr, nil -} - -func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID) -} - -// checkRespCode returns UnexpectedStatusError if the given response code is not -// one of the allowed status codes; otherwise nil. -func checkRespCode(respCode int, allowed []int) error { - for _, v := range allowed { - if respCode == v { - return nil - } - } - return UnexpectedStatusCodeError{allowed, respCode} -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go deleted file mode 100644 index 5bc52110..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package storage - -import ( - "encoding/base64" - "net/url" - "os" - "testing" - - chk "gopkg.in/check.v1" -) - -// Hook up gocheck to testing -func Test(t *testing.T) { chk.TestingT(t) } - -type StorageClientSuite struct{} - -var _ = chk.Suite(&StorageClientSuite{}) - -// getBasicClient returns a test client from storage credentials in the env -func getBasicClient(c *chk.C) Client { - name := os.Getenv("ACCOUNT_NAME") - if name == "" { - c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test") - } - key := os.Getenv("ACCOUNT_KEY") - if key == "" { - c.Fatal("ACCOUNT_KEY not set") - } - cli, err := NewBasicClient(name, key) - c.Assert(err, chk.IsNil) - return cli -} - -func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion) - c.Assert(err, chk.IsNil) - c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net") -} - -func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) { - apiVersion := "2015-01-01" // a non existing one - cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false) - c.Assert(err, chk.IsNil) - c.Assert(cli.apiVersion, chk.Equals, apiVersion) - c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn") -} - -func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - output := cli.getEndpoint(blobServiceName, "", url.Values{}) - c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/") -} - -func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - output := cli.getEndpoint(blobServiceName, "path", url.Values{}) - c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path") -} - -func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - params := url.Values{} - params.Set("a", "b") - params.Set("c", "d") - output := cli.getEndpoint(blobServiceName, "", params) - c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d") -} - -func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - params := url.Values{} - params.Set("a", "b") - params.Set("c", "d") - output := cli.getEndpoint(blobServiceName, "path", params) - c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d") -} - -func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - - headers := cli.getStandardHeaders() - c.Assert(len(headers), chk.Equals, 2) - c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion) - if _, ok := headers["x-ms-date"]; !ok { - c.Fatal("Missing date header") - } -} - -func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - - type test struct{ url, expected string } - tests := []test{ - {"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"}, - {"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"}, - {"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"}, - } - - for _, i := range tests { - out, err := cli.buildCanonicalizedResource(i.url) - c.Assert(err, chk.IsNil) - c.Assert(out, chk.Equals, i.expected) - } -} - -func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - - type test struct { - headers map[string]string - expected string - } - tests := []test{ - {map[string]string{}, ""}, - {map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"}, - {map[string]string{"foo:": "bar"}, ""}, - {map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"}, - {map[string]string{ - "x-ms-version": "9999-99-99", - "x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}} - - for _, i := range tests { - c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected) - } -} - -func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) { - // attempt to delete a nonexisting container - _, err := getBlobClient(c).deleteContainer(randContainer()) - c.Assert(err, chk.NotNil) - - v, ok := err.(AzureStorageServiceError) - c.Check(ok, chk.Equals, true) - c.Assert(v.StatusCode, chk.Equals, 404) - c.Assert(v.Code, chk.Equals, "ContainerNotFound") - c.Assert(v.Code, chk.Not(chk.Equals), "") -} - -func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) { - key := base64.StdEncoding.EncodeToString([]byte("bar")) - cli, err := NewBasicClient("foo", key) - c.Assert(err, chk.IsNil) - - canonicalizedString := `foobarzoo` - expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=` - c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue.go deleted file mode 100644 index fa017f4c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ /dev/null @@ -1,230 +0,0 @@ -package storage - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" -) - -// QueueServiceClient contains operations for Microsoft Azure Queue Storage -// Service. -type QueueServiceClient struct { - client Client -} - -func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } -func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } -func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } - -type putMessageRequest struct { - XMLName xml.Name `xml:"QueueMessage"` - MessageText string `xml:"MessageText"` -} - -// PutMessageParameters is the set of options can be specified for Put Messsage -// operation. A zero struct does not use any preferences for the request. -type PutMessageParameters struct { - VisibilityTimeout int - MessageTTL int -} - -func (p PutMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - if p.MessageTTL != 0 { - out.Set("messagettl", strconv.Itoa(p.MessageTTL)) - } - return out -} - -// GetMessagesParameters is the set of options can be specified for Get -// Messsages operation. A zero struct does not use any preferences for the -// request. -type GetMessagesParameters struct { - NumOfMessages int - VisibilityTimeout int -} - -func (p GetMessagesParameters) getParameters() url.Values { - out := url.Values{} - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - return out -} - -// PeekMessagesParameters is the set of options can be specified for Peek -// Messsage operation. A zero struct does not use any preferences for the -// request. -type PeekMessagesParameters struct { - NumOfMessages int -} - -func (p PeekMessagesParameters) getParameters() url.Values { - out := url.Values{"peekonly": {"true"}} // Required for peek operation - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - return out -} - -// GetMessagesResponse represents a response returned from Get Messages -// operation. -type GetMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` -} - -// GetMessageResponse represents a QueueMessage object returned from Get -// Messages operation response. -type GetMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - TimeNextVisible string `xml:"TimeNextVisible"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// PeekMessagesResponse represents a response returned from Get Messages -// operation. -type PeekMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` -} - -// PeekMessageResponse represents a QueueMessage object returned from Peek -// Messages operation response. -type PeekMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// CreateQueue operation creates a queue under the given account. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx -func (c QueueServiceClient) CreateQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - headers := c.client.getStandardHeaders() - headers["Content-Length"] = "0" - resp, err := c.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// DeleteQueue operation permanently deletes the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx -func (c QueueServiceClient) DeleteQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// QueueExists returns true if a queue with given name exists. -func (c QueueServiceClient) QueueExists(name string) (bool, error) { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) - if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { - return resp.statusCode == http.StatusOK, nil - } - - return false, err -} - -// PutMessage operation adds a new message to the back of the message queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx -func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(nn) - resp, err := c.client.exec("POST", uri, headers, body) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// ClearMessages operation deletes all messages from the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx -func (c QueueServiceClient) ClearMessages(queue string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// GetMessages operation retrieves one or more messages from the front of the -// queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx -func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { - var r GetMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return r, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err -} - -// PeekMessages retrieves one or more messages from the front of the queue, but -// does not alter the visibility of the message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx -func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { - var r PeekMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return r, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err -} - -// DeleteMessage operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx -func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ - "popreceipt": {popReceipt}}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go deleted file mode 100644 index 5c7bad93..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package storage - -import ( - chk "gopkg.in/check.v1" -) - -type StorageQueueSuite struct{} - -var _ = chk.Suite(&StorageQueueSuite{}) - -func getQueueClient(c *chk.C) QueueServiceClient { - return getBasicClient(c).GetQueueService() -} - -func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) { - c.Assert(pathForQueue("q"), chk.Equals, "/q") -} - -func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) { - c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages") -} - -func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) { - c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m") -} - -func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) { - cli := getQueueClient(c) - name := randString(20) - c.Assert(cli.CreateQueue(name), chk.IsNil) - c.Assert(cli.DeleteQueue(name), chk.IsNil) -} - -func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { - cli := getQueueClient(c) - ok, err := cli.QueueExists("nonexistent-queue") - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) - - name := randString(20) - c.Assert(cli.CreateQueue(name), chk.IsNil) - defer cli.DeleteQueue(name) - - ok, err = cli.QueueExists(name) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) -} - -func (s *StorageQueueSuite) TestPostMessage_PeekMessage_DeleteMessage(c *chk.C) { - q := randString(20) - cli := getQueueClient(c) - c.Assert(cli.CreateQueue(q), chk.IsNil) - defer cli.DeleteQueue(q) - - msg := randString(64 * 1024) // exercise max length - c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil) - r, err := cli.PeekMessages(q, PeekMessagesParameters{}) - c.Assert(err, chk.IsNil) - c.Assert(len(r.QueueMessagesList), chk.Equals, 1) - c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg) -} - -func (s *StorageQueueSuite) TestGetMessages(c *chk.C) { - q := randString(20) - cli := getQueueClient(c) - c.Assert(cli.CreateQueue(q), chk.IsNil) - defer cli.DeleteQueue(q) - - n := 4 - for i := 0; i < n; i++ { - c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil) - } - - r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n}) - c.Assert(err, chk.IsNil) - c.Assert(len(r.QueueMessagesList), chk.Equals, n) -} - -func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) { - q := randString(20) - cli := getQueueClient(c) - c.Assert(cli.CreateQueue(q), chk.IsNil) - defer cli.DeleteQueue(q) - - c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil) - r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1}) - c.Assert(err, chk.IsNil) - c.Assert(len(r.QueueMessagesList), chk.Equals, 1) - m := r.QueueMessagesList[0] - c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util.go deleted file mode 100644 index 33155af7..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util.go +++ /dev/null @@ -1,71 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "time" -) - -func (c Client) computeHmac256(message string) string { - h := hmac.New(sha256.New, c.accountKey) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func currentTimeRfc1123Formatted() string { - return timeRfc1123Formatted(time.Now().UTC()) -} - -func timeRfc1123Formatted(t time.Time) string { - return t.Format(http.TimeFormat) -} - -func mergeParams(v1, v2 url.Values) url.Values { - out := url.Values{} - for k, v := range v1 { - out[k] = v - } - for k, v := range v2 { - vals, ok := out[k] - if ok { - vals = append(vals, v...) - out[k] = vals - } else { - out[k] = v - } - } - return out -} - -func prepareBlockListRequest(blocks []Block) string { - s := `` - for _, v := range blocks { - s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) - } - s += `` - return s -} - -func xmlUnmarshal(body io.Reader, v interface{}) error { - data, err := ioutil.ReadAll(body) - if err != nil { - return err - } - return xml.Unmarshal(data, v) -} - -func xmlMarshal(v interface{}) (io.Reader, int, error) { - b, err := xml.Marshal(v) - if err != nil { - return nil, 0, err - } - return bytes.NewReader(b), len(b), nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go deleted file mode 100644 index 9bf82dcc..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package storage - -import ( - "encoding/xml" - "io/ioutil" - "net/url" - "strings" - "time" - - chk "gopkg.in/check.v1" -) - -func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) { - now := time.Now().UTC() - expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT" - c.Assert(timeRfc1123Formatted(now), chk.Equals, now.Format(expectedLayout)) -} - -func (s *StorageClientSuite) Test_mergeParams(c *chk.C) { - v1 := url.Values{ - "k1": {"v1"}, - "k2": {"v2"}} - v2 := url.Values{ - "k1": {"v11"}, - "k3": {"v3"}} - out := mergeParams(v1, v2) - c.Assert(out.Get("k1"), chk.Equals, "v1") - c.Assert(out.Get("k2"), chk.Equals, "v2") - c.Assert(out.Get("k3"), chk.Equals, "v3") - c.Assert(out["k1"], chk.DeepEquals, []string{"v1", "v11"}) -} - -func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) { - empty := []Block{} - expected := `` - c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected) - - blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}} - expected = `foobar` - c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected) -} - -func (s *StorageClientSuite) Test_xmlUnmarshal(c *chk.C) { - xml := ` - - myblob - ` - var blob Blob - body := ioutil.NopCloser(strings.NewReader(xml)) - c.Assert(xmlUnmarshal(body, &blob), chk.IsNil) - c.Assert(blob.Name, chk.Equals, "myblob") -} - -func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) { - type t struct { - XMLName xml.Name `xml:"S"` - Name string `xml:"Name"` - } - - b := t{Name: "myblob"} - expected := `myblob` - r, i, err := xmlMarshal(b) - c.Assert(err, chk.IsNil) - o, err := ioutil.ReadAll(r) - c.Assert(err, chk.IsNil) - out := string(o) - c.Assert(out, chk.Equals, expected) - c.Assert(i, chk.Equals, len(expected)) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a0..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index 2d8c0866..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.2 - - 1.3 - - 1.4 - - tip -install: - - go get -t ./... diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index eb72bff9..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,7 +0,0 @@ -# 0.7.3 - -formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index d55f9092..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,349 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` - - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(logrus.JSONFormatter) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(logrus.TextFormatter) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). - - ```go - logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) - ``` - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 17fe6f70..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,252 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go deleted file mode 100644 index 98717df4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEntryPanicln(t *testing.T) { - errBoom := fmt.Errorf("boom time") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicln("kaboom") -} - -func TestEntryPanicf(t *testing.T) { - errBoom := fmt.Errorf("boom again") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom true", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicf("kaboom %v", true) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go deleted file mode 100644 index a1623ec0..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.JSONFormatter) - log.Formatter = new(logrus.TextFormatter) // default - log.Level = logrus.DebugLevel -} - -func main() { - defer func() { - err := recover() - if err != nil { - log.WithFields(logrus.Fields{ - "omg": true, - "err": err, - "number": 100, - }).Fatal("The ice breaks!") - } - }() - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "number": 8, - }).Debug("Started observing beach") - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "temperature": -4, - }).Debug("Temperature changes") - - log.WithFields(logrus.Fields{ - "animal": "orca", - "size": 9009, - }).Panic("It's over 9000!") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go deleted file mode 100644 index cb5759a3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.TextFormatter) // default - log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development")) -} - -func main() { - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index a67e1b80..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,188 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index 104d689f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] - } - - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] - } - - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go deleted file mode 100644 index 77989da6..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package logrus - -import ( - "testing" - "time" -) - -// smallFields is a small size data set for benchmarking -var smallFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -// largeFields is a large size data set for benchmarking -var largeFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", - "five": "six", - "seven": "eight", - "nine": "ten", - "eleven": "twelve", - "thirteen": "fourteen", - "fifteen": "sixteen", - "seventeen": "eighteen", - "nineteen": "twenty", - "a": "b", - "c": "d", - "e": "f", - "g": "h", - "i": "j", - "k": "l", - "m": "n", - "o": "p", - "q": "r", - "s": "t", - "u": "v", - "w": "x", - "y": "z", - "this": "will", - "make": "thirty", - "entries": "yeah", -} - -func BenchmarkSmallTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkLargeTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) -} - -func BenchmarkSmallColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) -} - -func BenchmarkLargeColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) -} - -func BenchmarkSmallJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, smallFields) -} - -func BenchmarkLargeJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, largeFields) -} - -func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { - entry := &Entry{ - Time: time.Time{}, - Level: InfoLevel, - Message: "message", - Data: fields, - } - var d []byte - var err error - for i := 0; i < b.N; i++ { - d, err = formatter.Format(entry) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(d))) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go deleted file mode 100644 index 8ea93ddf..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go +++ /dev/null @@ -1,56 +0,0 @@ -package logstash - -import ( - "encoding/json" - "fmt" - - "github.com/Sirupsen/logrus" -) - -// Formatter generates json in logstash format. -// Logstash site: http://logstash.net/ -type LogstashFormatter struct { - Type string // if not empty use for logstash type field. - - // TimestampFormat sets the format used for timestamps. - TimestampFormat string -} - -func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { - entry.Data["@version"] = 1 - - if f.TimestampFormat == "" { - f.TimestampFormat = logrus.DefaultTimestampFormat - } - - entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat) - - // set message field - v, ok := entry.Data["message"] - if ok { - entry.Data["fields.message"] = v - } - entry.Data["message"] = entry.Message - - // set level field - v, ok = entry.Data["level"] - if ok { - entry.Data["fields.level"] = v - } - entry.Data["level"] = entry.Level.String() - - // set type field - if f.Type != "" { - v, ok = entry.Data["type"] - if ok { - entry.Data["fields.type"] = v - } - entry.Data["type"] = f.Type - } - - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go deleted file mode 100644 index d8814a0e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package logstash - -import ( - "bytes" - "encoding/json" - "github.com/Sirupsen/logrus" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestLogstashFormatter(t *testing.T) { - assert := assert.New(t) - - lf := LogstashFormatter{Type: "abc"} - - fields := logrus.Fields{ - "message": "def", - "level": "ijk", - "type": "lmn", - "one": 1, - "pi": 3.14, - "bool": true, - } - - entry := logrus.WithFields(fields) - entry.Message = "msg" - entry.Level = logrus.InfoLevel - - b, _ := lf.Format(entry) - - var data map[string]interface{} - dec := json.NewDecoder(bytes.NewReader(b)) - dec.UseNumber() - dec.Decode(&data) - - // base fields - assert.Equal(json.Number("1"), data["@version"]) - assert.NotEmpty(data["@timestamp"]) - assert.Equal("abc", data["type"]) - assert.Equal("msg", data["message"]) - assert.Equal("info", data["level"]) - - // substituted fields - assert.Equal("def", data["fields.message"]) - assert.Equal("ijk", data["fields.level"]) - assert.Equal("lmn", data["fields.type"]) - - // formats - assert.Equal(json.Number("1"), data["one"]) - assert.Equal(json.Number("3.14"), data["pi"]) - assert.Equal(true, data["bool"]) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go deleted file mode 100644 index 13f34cb6..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package logrus - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type TestHook struct { - Fired bool -} - -func (hook *TestHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *TestHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookFires(t *testing.T) { - hook := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - assert.Equal(t, hook.Fired, false) - - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -type ModifyHook struct { -} - -func (hook *ModifyHook) Fire(entry *Entry) error { - entry.Data["wow"] = "whale" - return nil -} - -func (hook *ModifyHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookCanModifyEntry(t *testing.T) { - hook := new(ModifyHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - }) -} - -func TestCanFireMultipleHooks(t *testing.T) { - hook1 := new(ModifyHook) - hook2 := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook1) - log.Hooks.Add(hook2) - - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - assert.Equal(t, hook2.Fired, true) - }) -} - -type ErrorHook struct { - Fired bool -} - -func (hook *ErrorHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *ErrorHook) Levels() []Level { - return []Level{ - ErrorLevel, - } -} - -func TestErrorHookShouldntFireOnInfo(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, false) - }) -} - -func TestErrorHookShouldFireOnError(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Error("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 0da2b365..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type levelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks levelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks levelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go deleted file mode 100644 index b0502c33..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go +++ /dev/null @@ -1,54 +0,0 @@ -package airbrake - -import ( - "errors" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/tobi/airbrake-go" -) - -// AirbrakeHook to send exceptions to an exception-tracking service compatible -// with the Airbrake API. -type airbrakeHook struct { - APIKey string - Endpoint string - Environment string -} - -func NewHook(endpoint, apiKey, env string) *airbrakeHook { - return &airbrakeHook{ - APIKey: apiKey, - Endpoint: endpoint, - Environment: env, - } -} - -func (hook *airbrakeHook) Fire(entry *logrus.Entry) error { - airbrake.ApiKey = hook.APIKey - airbrake.Endpoint = hook.Endpoint - airbrake.Environment = hook.Environment - - var notifyErr error - err, ok := entry.Data["error"].(error) - if ok { - notifyErr = err - } else { - notifyErr = errors.New(entry.Message) - } - - airErr := airbrake.Notify(notifyErr) - if airErr != nil { - return fmt.Errorf("Failed to send error to Airbrake: %s", airErr) - } - - return nil -} - -func (hook *airbrakeHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.ErrorLevel, - logrus.FatalLevel, - logrus.PanicLevel, - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go deleted file mode 100644 index 058a91e3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package airbrake - -import ( - "encoding/xml" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Sirupsen/logrus" -) - -type notice struct { - Error NoticeError `xml:"error"` -} -type NoticeError struct { - Class string `xml:"class"` - Message string `xml:"message"` -} - -type customErr struct { - msg string -} - -func (e *customErr) Error() string { - return e.msg -} - -const ( - testAPIKey = "abcxyz" - testEnv = "development" - expectedClass = "*airbrake.customErr" - expectedMsg = "foo" - unintendedMsg = "Airbrake will not see this string" -) - -var ( - noticeError = make(chan NoticeError, 1) -) - -// TestLogEntryMessageReceived checks if invoking Logrus' log.Error -// method causes an XML payload containing the log entry message is received -// by a HTTP server emulating an Airbrake-compatible endpoint. -func TestLogEntryMessageReceived(t *testing.T) { - log := logrus.New() - ts := startAirbrakeServer(t) - defer ts.Close() - - hook := NewHook(ts.URL, testAPIKey, "production") - log.Hooks.Add(hook) - - log.Error(expectedMsg) - - select { - case received := <-noticeError: - if received.Message != expectedMsg { - t.Errorf("Unexpected message received: %s", received.Message) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Airbrake API") - } -} - -// TestLogEntryMessageReceived confirms that, when passing an error type using -// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the -// error message returned by the Error() method on the error interface -// rather than the logrus.Entry.Message string. -func TestLogEntryWithErrorReceived(t *testing.T) { - log := logrus.New() - ts := startAirbrakeServer(t) - defer ts.Close() - - hook := NewHook(ts.URL, testAPIKey, "production") - log.Hooks.Add(hook) - - log.WithFields(logrus.Fields{ - "error": &customErr{expectedMsg}, - }).Error(unintendedMsg) - - select { - case received := <-noticeError: - if received.Message != expectedMsg { - t.Errorf("Unexpected message received: %s", received.Message) - } - if received.Class != expectedClass { - t.Errorf("Unexpected error class: %s", received.Class) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Airbrake API") - } -} - -// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a -// non-error type using logrus.Fields, a HTTP server emulating an Airbrake -// endpoint receives the logrus.Entry.Message string. -// -// Only error types are supported when setting the 'error' field using -// logrus.WithFields(). -func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) { - log := logrus.New() - ts := startAirbrakeServer(t) - defer ts.Close() - - hook := NewHook(ts.URL, testAPIKey, "production") - log.Hooks.Add(hook) - - log.WithFields(logrus.Fields{ - "error": expectedMsg, - }).Error(unintendedMsg) - - select { - case received := <-noticeError: - if received.Message != unintendedMsg { - t.Errorf("Unexpected message received: %s", received.Message) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Airbrake API") - } -} - -func startAirbrakeServer(t *testing.T) *httptest.Server { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var notice notice - if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil { - t.Error(err) - } - r.Body.Close() - - noticeError <- notice.Error - })) - - return ts -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go deleted file mode 100644 index d20a0f54..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go +++ /dev/null @@ -1,68 +0,0 @@ -package logrus_bugsnag - -import ( - "errors" - - "github.com/Sirupsen/logrus" - "github.com/bugsnag/bugsnag-go" -) - -type bugsnagHook struct{} - -// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before -// bugsnag.Configure. Bugsnag must be configured before the hook. -var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook") - -// ErrBugsnagSendFailed indicates that the hook failed to submit an error to -// bugsnag. The error was successfully generated, but `bugsnag.Notify()` -// failed. -type ErrBugsnagSendFailed struct { - err error -} - -func (e ErrBugsnagSendFailed) Error() string { - return "failed to send error to Bugsnag: " + e.err.Error() -} - -// NewBugsnagHook initializes a logrus hook which sends exceptions to an -// exception-tracking service compatible with the Bugsnag API. Before using -// this hook, you must call bugsnag.Configure(). The returned object should be -// registered with a log via `AddHook()` -// -// Entries that trigger an Error, Fatal or Panic should now include an "error" -// field to send to Bugsnag. -func NewBugsnagHook() (*bugsnagHook, error) { - if bugsnag.Config.APIKey == "" { - return nil, ErrBugsnagUnconfigured - } - return &bugsnagHook{}, nil -} - -// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the -// "error" field (or the Message if the error isn't present) and sends it off. -func (hook *bugsnagHook) Fire(entry *logrus.Entry) error { - var notifyErr error - err, ok := entry.Data["error"].(error) - if ok { - notifyErr = err - } else { - notifyErr = errors.New(entry.Message) - } - - bugsnagErr := bugsnag.Notify(notifyErr) - if bugsnagErr != nil { - return ErrBugsnagSendFailed{bugsnagErr} - } - - return nil -} - -// Levels enumerates the log levels on which the error should be forwarded to -// bugsnag: everything at or above the "Error" level. -func (hook *bugsnagHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.ErrorLevel, - logrus.FatalLevel, - logrus.PanicLevel, - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go deleted file mode 100644 index e9ea298d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package logrus_bugsnag - -import ( - "encoding/json" - "errors" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Sirupsen/logrus" - "github.com/bugsnag/bugsnag-go" -) - -type notice struct { - Events []struct { - Exceptions []struct { - Message string `json:"message"` - } `json:"exceptions"` - } `json:"events"` -} - -func TestNoticeReceived(t *testing.T) { - msg := make(chan string, 1) - expectedMsg := "foo" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var notice notice - data, _ := ioutil.ReadAll(r.Body) - if err := json.Unmarshal(data, ¬ice); err != nil { - t.Error(err) - } - _ = r.Body.Close() - - msg <- notice.Events[0].Exceptions[0].Message - })) - defer ts.Close() - - hook := &bugsnagHook{} - - bugsnag.Configure(bugsnag.Configuration{ - Endpoint: ts.URL, - ReleaseStage: "production", - APIKey: "12345678901234567890123456789012", - Synchronous: true, - }) - - log := logrus.New() - log.Hooks.Add(hook) - - log.WithFields(logrus.Fields{ - "error": errors.New(expectedMsg), - }).Error("Bugsnag will not see this string") - - select { - case received := <-msg: - if received != expectedMsg { - t.Errorf("Unexpected message received: %s", received) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Bugsnag API") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md deleted file mode 100644 index ae61e922..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Papertrail Hook for Logrus :walrus: - -[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts). - -In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible. - -## Usage - -You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`. - -For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs. - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/papertrail" -) - -func main() { - log := logrus.New() - hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME) - - if err == nil { - log.Hooks.Add(hook) - } -} -``` diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go deleted file mode 100644 index c0f10c1b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go +++ /dev/null @@ -1,55 +0,0 @@ -package logrus_papertrail - -import ( - "fmt" - "net" - "os" - "time" - - "github.com/Sirupsen/logrus" -) - -const ( - format = "Jan 2 15:04:05" -) - -// PapertrailHook to send logs to a logging service compatible with the Papertrail API. -type PapertrailHook struct { - Host string - Port int - AppName string - UDPConn net.Conn -} - -// NewPapertrailHook creates a hook to be added to an instance of logger. -func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) { - conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port)) - return &PapertrailHook{host, port, appName, conn}, err -} - -// Fire is called when a log event is fired. -func (hook *PapertrailHook) Fire(entry *logrus.Entry) error { - date := time.Now().Format(format) - msg, _ := entry.String() - payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg) - - bytesWritten, err := hook.UDPConn.Write([]byte(payload)) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err) - return err - } - - return nil -} - -// Levels returns the available logging levels. -func (hook *PapertrailHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go deleted file mode 100644 index 96318d00..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package logrus_papertrail - -import ( - "fmt" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/stvp/go-udp-testing" -) - -func TestWritingToUDP(t *testing.T) { - port := 16661 - udp.SetAddr(fmt.Sprintf(":%d", port)) - - hook, err := NewPapertrailHook("localhost", port, "test") - if err != nil { - t.Errorf("Unable to connect to local UDP server.") - } - - log := logrus.New() - log.Hooks.Add(hook) - - udp.ShouldReceive(t, "foo", func() { - log.Info("foo") - }) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md deleted file mode 100644 index 19e58bb4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# Sentry Hook for Logrus :walrus: - -[Sentry](https://getsentry.com) provides both self-hosted and hosted -solutions for exception tracking. -Both client and server are -[open source](https://github.com/getsentry/sentry). - -## Usage - -Every sentry application defined on the server gets a different -[DSN](https://www.getsentry.com/docs/). In the example below replace -`YOUR_DSN` with the one created for your application. - -```go -import ( - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/sentry" -) - -func main() { - log := logrus.New() - hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - }) - - if err == nil { - log.Hooks.Add(hook) - } -} -``` - -## Special fields - -Some logrus fields have a special meaning in this hook, -these are server_name and logger. -When logs are sent to sentry these fields are treated differently. -- server_name (also known as hostname) is the name of the server which -is logging the event (hostname.example.com) -- logger is the part of the application which is logging the event. -In go this usually means setting it to the name of the package. - -## Timeout - -`Timeout` is the time the sentry hook will wait for a response -from the sentry server. - -If this time elapses with no response from -the server an error will be returned. - -If `Timeout` is set to 0 the SentryHook will not wait for a reply -and will assume a correct delivery. - -The SentryHook has a default timeout of `100 milliseconds` when created -with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field: - -```go -hook, _ := logrus_sentry.NewSentryHook(...) -hook.Timeout = 20*time.Second -``` diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go deleted file mode 100644 index 379f281c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go +++ /dev/null @@ -1,100 +0,0 @@ -package logrus_sentry - -import ( - "fmt" - "time" - - "github.com/Sirupsen/logrus" - "github.com/getsentry/raven-go" -) - -var ( - severityMap = map[logrus.Level]raven.Severity{ - logrus.DebugLevel: raven.DEBUG, - logrus.InfoLevel: raven.INFO, - logrus.WarnLevel: raven.WARNING, - logrus.ErrorLevel: raven.ERROR, - logrus.FatalLevel: raven.FATAL, - logrus.PanicLevel: raven.FATAL, - } -) - -func getAndDel(d logrus.Fields, key string) (string, bool) { - var ( - ok bool - v interface{} - val string - ) - if v, ok = d[key]; !ok { - return "", false - } - - if val, ok = v.(string); !ok { - return "", false - } - delete(d, key) - return val, true -} - -// SentryHook delivers logs to a sentry server. -type SentryHook struct { - // Timeout sets the time to wait for a delivery error from the sentry server. - // If this is set to zero the server will not wait for any response and will - // consider the message correctly sent - Timeout time.Duration - - client *raven.Client - levels []logrus.Level -} - -// NewSentryHook creates a hook to be added to an instance of logger -// and initializes the raven client. -// This method sets the timeout to 100 milliseconds. -func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) { - client, err := raven.NewClient(DSN, nil) - if err != nil { - return nil, err - } - return &SentryHook{100 * time.Millisecond, client, levels}, nil -} - -// Called when an event should be sent to sentry -// Special fields that sentry uses to give more information to the server -// are extracted from entry.Data (if they are found) -// These fields are: logger and server_name -func (hook *SentryHook) Fire(entry *logrus.Entry) error { - packet := &raven.Packet{ - Message: entry.Message, - Timestamp: raven.Timestamp(entry.Time), - Level: severityMap[entry.Level], - Platform: "go", - } - - d := entry.Data - - if logger, ok := getAndDel(d, "logger"); ok { - packet.Logger = logger - } - if serverName, ok := getAndDel(d, "server_name"); ok { - packet.ServerName = serverName - } - packet.Extra = map[string]interface{}(d) - - _, errCh := hook.client.Capture(packet, nil) - timeout := hook.Timeout - if timeout != 0 { - timeoutCh := time.After(timeout) - select { - case err := <-errCh: - return err - case <-timeoutCh: - return fmt.Errorf("no response from sentry server in %s", timeout) - } - } - return nil -} - -// Levels returns the available logging levels. -func (hook *SentryHook) Levels() []logrus.Level { - return hook.levels -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go deleted file mode 100644 index 45f18d17..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package logrus_sentry - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/getsentry/raven-go" -) - -const ( - message = "error message" - server_name = "testserver.internal" - logger_name = "test.logger" -) - -func getTestLogger() *logrus.Logger { - l := logrus.New() - l.Out = ioutil.Discard - return l -} - -func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) { - pch := make(chan *raven.Packet, 1) - s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - d := json.NewDecoder(req.Body) - p := &raven.Packet{} - err := d.Decode(p) - if err != nil { - t.Fatal(err.Error()) - } - - pch <- p - })) - defer s.Close() - - fragments := strings.SplitN(s.URL, "://", 2) - dsn := fmt.Sprintf( - "%s://public:secret@%s/sentry/project-id", - fragments[0], - fragments[1], - ) - tf(dsn, pch) -} - -func TestSpecialFields(t *testing.T) { - WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { - logger := getTestLogger() - - hook, err := NewSentryHook(dsn, []logrus.Level{ - logrus.ErrorLevel, - }) - - if err != nil { - t.Fatal(err.Error()) - } - logger.Hooks.Add(hook) - logger.WithFields(logrus.Fields{ - "server_name": server_name, - "logger": logger_name, - }).Error(message) - - packet := <-pch - if packet.Logger != logger_name { - t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger) - } - - if packet.ServerName != server_name { - t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName) - } - }) -} - -func TestSentryHandler(t *testing.T) { - WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { - logger := getTestLogger() - hook, err := NewSentryHook(dsn, []logrus.Level{ - logrus.ErrorLevel, - }) - if err != nil { - t.Fatal(err.Error()) - } - logger.Hooks.Add(hook) - - logger.Error(message) - packet := <-pch - if packet.Message != message { - t.Errorf("message should have been %s, was %s", message, packet.Message) - } - }) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md deleted file mode 100644 index 4dbb8e72..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Syslog Hooks for Logrus :walrus: - -## Usage - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go deleted file mode 100644 index b6fa3746..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go +++ /dev/null @@ -1,59 +0,0 @@ -package logrus_syslog - -import ( - "fmt" - "github.com/Sirupsen/logrus" - "log/syslog" - "os" -) - -// SyslogHook to send logs via syslog. -type SyslogHook struct { - Writer *syslog.Writer - SyslogNetwork string - SyslogRaddr string -} - -// Creates a hook to be added to an instance of logger. This is called with -// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` -// `if err == nil { log.Hooks.Add(hook) }` -func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { - w, err := syslog.Dial(network, raddr, priority, tag) - return &SyslogHook{w, network, raddr}, err -} - -func (hook *SyslogHook) Fire(entry *logrus.Entry) error { - line, err := entry.String() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) - return err - } - - switch entry.Level { - case logrus.PanicLevel: - return hook.Writer.Crit(line) - case logrus.FatalLevel: - return hook.Writer.Crit(line) - case logrus.ErrorLevel: - return hook.Writer.Err(line) - case logrus.WarnLevel: - return hook.Writer.Warning(line) - case logrus.InfoLevel: - return hook.Writer.Info(line) - case logrus.DebugLevel: - return hook.Writer.Debug(line) - default: - return nil - } -} - -func (hook *SyslogHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go deleted file mode 100644 index 42762dc1..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package logrus_syslog - -import ( - "github.com/Sirupsen/logrus" - "log/syslog" - "testing" -) - -func TestLocalhostAddAndPrint(t *testing.T) { - log := logrus.New() - hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err != nil { - t.Errorf("Unable to connect to local syslog.") - } - - log.Hooks.Add(hook) - - for _, level := range hook.Levels() { - if len(log.Hooks[level]) != 1 { - t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) - } - } - - log.Info("Congratulations!") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index dcc4f1d9..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,40 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - if f.TimestampFormat == "" { - f.TimestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(f.TimestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go deleted file mode 100644 index 1d708732..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package logrus - -import ( - "encoding/json" - "errors" - - "testing" -) - -func TestErrorNotLost(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["error"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["omg"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestFieldClashWithTime(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("time", "right now!")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.time"] != "right now!" { - t.Fatal("fields.time not set to original time field") - } - - if entry["time"] != "0001-01-01T00:00:00Z" { - t.Fatal("time field not set to current time, was: ", entry["time"]) - } -} - -func TestFieldClashWithMsg(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("msg", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.msg"] != "something" { - t.Fatal("fields.msg not set to original msg field") - } -} - -func TestFieldClashWithLevel(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.level"] != "something" { - t.Fatal("fields.level not set to original level field") - } -} - -func TestJSONEntryEndsWithNewline(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - if b[len(b)-1] != '\n' { - t.Fatal("Expected JSON log entry to end with a newline") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index da928a37..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,203 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks levelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(levelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stdout, - Formatter: new(TextFormatter), - Hooks: make(levelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e9..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go deleted file mode 100644 index d85dba4d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "strconv" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - log(logger) - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assertions(fields) -} - -func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { - var buffer bytes.Buffer - - logger := New() - logger.Out = &buffer - logger.Formatter = &TextFormatter{ - DisableColors: true, - } - - log(logger) - - fields := make(map[string]string) - for _, kv := range strings.Split(buffer.String(), " ") { - if !strings.Contains(kv, "=") { - continue - } - kvArr := strings.Split(kv, "=") - key := strings.TrimSpace(kvArr[0]) - val := kvArr[1] - if kvArr[1][0] == '"' { - var err error - val, err = strconv.Unquote(val) - assert.NoError(t, err) - } - fields[key] = val - } - assertions(fields) -} - -func TestPrint(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestInfo(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestWarn(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Warn("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "warning") - }) -} - -func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test test") - }) -} - -func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test 10") - }) -} - -func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "testtest") - }) -} - -func TestWithFieldsShouldAllowAssignments(t *testing.T) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - localLog := logger.WithFields(Fields{ - "key1": "value1", - }) - - localLog.WithField("key2", "value2").Info("test") - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assert.Equal(t, "value2", fields["key2"]) - assert.Equal(t, "value1", fields["key1"]) - - buffer = bytes.Buffer{} - fields = Fields{} - localLog.Info("test") - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - _, ok := fields["key2"] - assert.Equal(t, false, ok) - assert.Equal(t, "value1", fields["key1"]) -} - -func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - }) -} - -func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["fields.msg"], "hello") - }) -} - -func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("time", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["fields.time"], "hello") - }) -} - -func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("level", 1).Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1) - }) -} - -func TestDefaultFieldsAreNotPrefixed(t *testing.T) { - LogAndAssertText(t, func(log *Logger) { - ll := log.WithField("herp", "derp") - ll.Info("hello") - ll.Info("bye") - }, func(fields map[string]string) { - for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { - if _, ok := fields[fieldName]; ok { - t.Fatalf("should not have prefixed %q: %v", fieldName, fields) - } - } - }) -} - -func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { - - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - llog := logger.WithField("context", "eating raw fish") - - llog.Info("looks delicious") - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded first message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "looks delicious") - assert.Equal(t, fields["context"], "eating raw fish") - - buffer.Reset() - - llog.Warn("omg it is!") - - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded second message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "omg it is!") - assert.Equal(t, fields["context"], "eating raw fish") - assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") - -} - -func TestConvertLevelToString(t *testing.T) { - assert.Equal(t, "debug", DebugLevel.String()) - assert.Equal(t, "info", InfoLevel.String()) - assert.Equal(t, "warning", WarnLevel.String()) - assert.Equal(t, "error", ErrorLevel.String()) - assert.Equal(t, "fatal", FatalLevel.String()) - assert.Equal(t, "panic", PanicLevel.String()) -} - -func TestParseLevel(t *testing.T) { - l, err := ParseLevel("panic") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("fatal") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("error") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("warn") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("warning") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("info") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("debug") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("invalid") - assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) -} - -func TestGetSetLevelRace(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - if i%2 == 0 { - SetLevel(InfoLevel) - } else { - GetLevel() - } - }(i) - - } - wg.Wait() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go deleted file mode 100644 index 8fe02a4a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go deleted file mode 100644 index 0428ee5d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -*/ -package logrus - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index b8bebc13..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go deleted file mode 100644 index af609a53..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f7..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 612417ff..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,149 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColored := (f.ForceColors || isTerminal) && !f.DisableColors - - if f.TimestampFormat == "" { - f.TimestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - f.appendKeyValue(b, "msg", entry.Message) - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { - switch value.(type) { - case string: - if needsQuoting(value.(string)) { - fmt.Fprintf(b, "%v=%s ", key, value) - } else { - fmt.Fprintf(b, "%v=%q ", key, value) - } - case error: - if needsQuoting(value.(error).Error()) { - fmt.Fprintf(b, "%v=%s ", key, value) - } else { - fmt.Fprintf(b, "%v=%q ", key, value) - } - default: - fmt.Fprintf(b, "%v=%v ", key, value) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go deleted file mode 100644 index e25a44f6..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package logrus - -import ( - "bytes" - "errors" - "testing" - "time" -) - -func TestQuoting(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - checkQuoting := func(q bool, value interface{}) { - b, _ := tf.Format(WithField("test", value)) - idx := bytes.Index(b, ([]byte)("test=")) - cont := bytes.Contains(b[idx+5:], []byte{'"'}) - if cont != q { - if q { - t.Errorf("quoting expected for: %#v", value) - } else { - t.Errorf("quoting not expected for: %#v", value) - } - } - } - - checkQuoting(false, "abcd") - checkQuoting(false, "v1.0") - checkQuoting(false, "1234567890") - checkQuoting(true, "/foobar") - checkQuoting(true, "x y") - checkQuoting(true, "x,y") - checkQuoting(false, errors.New("invalid")) - checkQuoting(true, errors.New("invalid argument")) -} - -func TestTimestampFormat(t *testing.T) { - checkTimeStr := func(format string) { - customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} - customStr, _ := customFormatter.Format(WithField("test", "test")) - timeStart := bytes.Index(customStr, ([]byte)("time=")) - timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5 : timeEnd-1] - if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { - timeStr = timeStr[1 : len(timeStr)-1] - } - if format == "" { - format = time.RFC3339 - } - _, e := time.Parse(format, (string)(timeStr)) - if e != nil { - t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) - } - } - - checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") - checkTimeStr("Mon Jan _2 15:04:05 2006") - checkTimeStr("") -} - -// TODO add tests for sorting etc., this requires a parser for the text -// formatter output. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c7..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/.travis.yml deleted file mode 100644 index a77773b8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - tip - -install: - - go get github.com/bugsnag/panicwrap - - go get github.com/bugsnag/osext - - go get github.com/bitly/go-simplejson - - go get github.com/revel/revel diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/LICENSE.txt b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/LICENSE.txt deleted file mode 100644 index 3cb0ec0f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014 Bugsnag - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/README.md deleted file mode 100644 index b5432293..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/README.md +++ /dev/null @@ -1,489 +0,0 @@ -Bugsnag Notifier for Golang -=========================== - -The Bugsnag Notifier for Golang gives you instant notification of panics, or -unexpected errors, in your golang app. Any unhandled panics will trigger a -notification to be sent to your Bugsnag project. - -[Bugsnag](http://bugsnag.com) captures errors in real-time from your web, -mobile and desktop applications, helping you to understand and resolve them -as fast as possible. [Create a free account](http://bugsnag.com) to start -capturing exceptions from your applications. - -## How to Install - -1. Download the code - - ```shell - go get github.com/bugsnag/bugsnag-go - ``` - -### Using with net/http apps - -For a golang app based on [net/http](https://godoc.org/net/http), integrating -Bugsnag takes two steps. You should also use these instructions if you're using -the [gorilla toolkit](http://www.gorillatoolkit.org/), or the -[pat](https://github.com/bmizerany/pat/) muxer. - -1. Configure bugsnag at the start of your `main()` function: - - ```go - import "github.com/bugsnag/bugsnag-go" - - func main() { - bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", - ReleaseStage: "production", - // more configuration options - }) - - // rest of your program. - } - ``` - -2. Wrap your server in a [bugsnag.Handler](https://godoc.org/github.com/bugsnag/bugsnag-go/#Handler) - - ```go - // a. If you're using the builtin http mux, you can just pass - // bugsnag.Handler(nil) to http.ListenAndServer - http.ListenAndServe(":8080", bugsnag.Handler(nil)) - - // b. If you're creating a server manually yourself, you can set - // its handlers the same way - srv := http.Server{ - Handler: bugsnag.Handler(nil) - } - - // c. If you're not using the builtin http mux, wrap your own handler - // (though make sure that it doesn't already catch panics) - http.ListenAndServe(":8080", bugsnag.Handler(handler)) - ``` - -### Using with Revel apps - -There are two steps to get panic handling in [revel](https://revel.github.io) apps. - -1. Add the `bugsnagrevel.Filter` immediately after the `revel.PanicFilter` in `app/init.go`: - - ```go - - import "github.com/bugsnag/bugsnag-go/revel" - - revel.Filters = []revel.Filter{ - revel.PanicFilter, - bugsnagrevel.Filter, - // ... - } - ``` - -2. Set bugsnag.apikey in the top section of `conf/app.conf`. - - ``` - module.static=github.com/revel/revel/modules/static - - bugsnag.apikey=YOUR_API_KEY_HERE - - [dev] - ``` - -### Using with Google App Engine - -1. Configure bugsnag at the start of your `init()` function: - - ```go - import "github.com/bugsnag/bugsnag-go" - - func init() { - bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", - }) - - // ... - } - ``` - -2. Wrap *every* http.Handler or http.HandlerFunc with Bugsnag: - - ```go - // a. If you're using HandlerFuncs - http.HandleFunc("/", bugsnag.HandlerFunc( - func (w http.ResponseWriter, r *http.Request) { - // ... - })) - - // b. If you're using Handlers - http.Handle("/", bugsnag.Handler(myHttpHandler)) - ``` - -3. In order to use Bugsnag, you must provide the current -[`appengine.Context`](https://developers.google.com/appengine/docs/go/reference#Context), or -current `*http.Request` as rawData. The easiest way to do this is to create a new notifier. - - ```go - c := appengine.NewContext(r) - notifier := bugsnag.New(c) - - if err != nil { - notifier.Notify(err) - } - - go func () { - defer notifier.Recover() - - // ... - }() - ``` - - -## Notifying Bugsnag manually - -Bugsnag will automatically handle any panics that crash your program and notify -you of them. If you've integrated with `revel` or `net/http`, then you'll also -be notified of any panics() that happen while processing a request. - -Sometimes however it's useful to manually notify Bugsnag of a problem. To do this, -call [`bugsnag.Notify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Notify) - -```go -if err != nil { - bugsnag.Notify(err) -} -``` - -### Manual panic handling - -To avoid a panic in a goroutine from crashing your entire app, you can use -[`bugsnag.Recover()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) -to stop a panic from unwinding the stack any further. When `Recover()` is hit, -it will send any current panic to Bugsnag and then stop panicking. This is -most useful at the start of a goroutine: - -```go -go func() { - defer bugsnag.Recover() - - // ... -}() -``` - -Alternatively you can use -[`bugsnag.AutoNotify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) -to notify bugsnag of a panic while letting the program continue to panic. This -is useful if you're using a Framework that already has some handling of panics -and you are retrofitting bugsnag support. - -```go -defer bugsnag.AutoNotify() -``` - -## Sending Custom Data - -Most functions in the Bugsnag API, including `bugsnag.Notify()`, -`bugsnag.Recover()`, `bugsnag.AutoNotify()`, and `bugsnag.Handler()` let you -attach data to the notifications that they send. To do this you pass in rawData, -which can be any of the supported types listed here. To add support for more -types of rawData see [OnBeforeNotify](#custom-data-with-onbeforenotify). - -### Custom MetaData - -Custom metaData appears as tabs on Bugsnag.com. You can set it by passing -a [`bugsnag.MetaData`](https://godoc.org/github.com/bugsnag/bugsnag-go/#MetaData) -object as rawData. - -```go -bugsnag.Notify(err, - bugsnag.MetaData{ - "Account": { - "Name": Account.Name, - "Paying": Account.Plan.Premium, - }, - }) -``` - -### Request data - -Bugsnag can extract interesting data from -[`*http.Request`](https://godoc.org/net/http/#Request) objects, and -[`*revel.Controller`](https://godoc.org/github.com/revel/revel/#Controller) -objects. These are automatically passed in when handling panics, and you can -pass them yourself. - -```go -func (w http.ResponseWriter, r *http.Request) { - bugsnag.Notify(err, r) -} -``` - -### User data - -User data is searchable, and the `Id` powers the count of users affected. You -can set which user an error affects by passing a -[`bugsnag.User`](https://godoc.org/github.com/bugsnag/bugsnag-go/#User) object as -rawData. - -```go -bugsnag.Notify(err, - bugsnag.User{Id: "1234", Name: "Conrad", Email: "me@cirw.in"}) -``` - -### Context - -The context shows up prominently in the list view so that you can get an idea -of where a problem occurred. You can set it by passing a -[`bugsnag.Context`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Context) -object as rawData. - -```go -bugsnag.Notify(err, bugsnag.Context{"backgroundJob"}) -``` - -### Severity - -Bugsnag supports three severities, `SeverityError`, `SeverityWarning`, and `SeverityInfo`. -You can set the severity of an error by passing one of these objects as rawData. - -```go -bugsnag.Notify(err, bugsnag.SeverityInfo) -``` - -## Configuration - -You must call `bugsnag.Configure()` at the start of your program to use Bugsnag, you pass it -a [`bugsnag.Configuration`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Configuration) object -containing any of the following values. - -### APIKey - -The Bugsnag API key can be found on your [Bugsnag dashboard](https://bugsnag.com) under "Settings". - -```go -bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", -}) -``` - -### Endpoint - -The Bugsnag endpoint defaults to `https://notify.bugsnag.com/`. If you're using Bugsnag enterprise, -you should set this to the endpoint of your local instance. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Endpoint: "http://bugsnag.internal:49000/", -}) -``` - -### ReleaseStage - -The ReleaseStage tracks where your app is deployed. You should set this to `production`, `staging`, -`development` or similar as appropriate. - -```go -bugsnag.Configure(bugsnag.Configuration{ - ReleaseStage: "development", -}) -``` - -### NotifyReleaseStages - -The list of ReleaseStages to notify in. By default Bugsnag will notify you in all release stages, but -you can use this to silence development errors. - -```go -bugsnag.Configure(bugsnag.Configuration{ - NotifyReleaseStages: []string{"production", "staging"}, -}) -``` - -### AppVersion - -If you use a versioning scheme for deploys of your app, Bugsnag can use the `AppVersion` to only -re-open errors if they occur in later version of the app. - -```go -bugsnag.Configure(bugsnag.Configuration{ - AppVersion: "1.2.3", -}) -``` - -### Hostname - -The hostname is used to track where exceptions are coming from in the Bugsnag dashboard. The -default value is obtained from `os.Hostname()` so you won't often need to change this. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Hostname: "go1", -}) -``` - -### ProjectPackages - -In order to determine where a crash happens Bugsnag needs to know which packages you consider to -be part of your app (as opposed to a library). By default this is set to `[]string{"main*"}`. Strings -are matched to package names using [`filepath.Match`](http://godoc.org/path/filepath#Match). - -```go -bugsnag.Configure(bugsnag.Configuration{ - ProjectPackages: []string{"main", "github.com/domain/myapp/*"}, -} -``` - -### ParamsFilters - -Sometimes sensitive data is accidentally included in Bugsnag MetaData. You can remove it by -setting `ParamsFilters`. Any key in the `MetaData` that includes any string in the filters -will be redacted. The default is `[]string{"password", "secret"}`, which prevents fields like -`password`, `password_confirmation` and `secret_answer` from being sent. - -```go -bugsnag.Configure(bugsnag.Configuration{ - ParamsFilters: []string{"password", "secret"}, -} -``` - -### Logger - -The Logger to write to in case of an error inside Bugsnag. This defaults to the global logger. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Logger: app.Logger, -} -``` - -### PanicHandler - -The first time Bugsnag is configured, it wraps the running program in a panic -handler using [panicwrap](http://godoc.org/github.com/ConradIrwin/panicwrap). This -forks a sub-process which monitors unhandled panics. To prevent this, set -`PanicHandler` to `func() {}` the first time you call -`bugsnag.Configure`. This will prevent bugsnag from being able to notify you about -unhandled panics. - -```go -bugsnag.Configure(bugsnag.Configuration{ - PanicHandler: func() {}, -}) -``` - -### Synchronous - -Bugsnag usually starts a new goroutine before sending notifications. This means -that notifications can be lost if you do a bugsnag.Notify and then immediately -os.Exit. To avoid this problem, set Bugsnag to Synchronous (or just `panic()` -instead ;). - -```go -bugsnag.Configure(bugsnag.Configuration{ - Synchronous: true -}) -``` - -Or just for one error: - -```go -bugsnag.Notify(err, bugsnag.Configuration{Synchronous: true}) -``` - -### Transport - -The transport configures how Bugsnag makes http requests. By default we use -[`http.DefaultTransport`](http://godoc.org/net/http#RoundTripper) which handles -HTTP proxies automatically using the `$HTTP_PROXY` environment variable. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Transport: http.DefaultTransport, -}) -``` - -## Custom data with OnBeforeNotify - -While it's nice that you can pass `MetaData` directly into `bugsnag.Notify`, -`bugsnag.AutoNotify`, and `bugsnag.Recover`, this can be a bit cumbersome and -inefficient — you're constructing the meta-data whether or not it will actually -be used. A better idea is to pass raw data in to these functions, and add an -`OnBeforeNotify` filter that converts them into `MetaData`. - -For example, lets say our system processes jobs: - -```go -type Job struct{ - Retry bool - UserId string - UserEmail string - Name string - Params map[string]string -} -``` - -You can pass a job directly into Bugsnag.notify: - -```go -bugsnag.Notify(err, job) -``` - -And then add a filter to extract information from that job and attach it to the -Bugsnag event: - -```go -bugsnag.OnBeforeNotify( - func(event *bugsnag.Event, config *bugsnag.Configuration) error { - - // Search all the RawData for any *Job pointers that we're passed in - // to bugsnag.Notify() and friends. - for _, datum := range event.RawData { - if job, ok := datum.(*Job); ok { - // don't notify bugsnag about errors in retries - if job.Retry { - return fmt.Errorf("not notifying about retried jobs") - } - - // add the job as a tab on Bugsnag.com - event.MetaData.AddStruct("Job", job) - - // set the user correctly - event.User = &User{Id: job.UserId, Email: job.UserEmail} - } - } - - // continue notifying as normal - return nil - }) -``` - -## Advanced Usage - -If you want to have multiple different configurations around in one program, -you can use `bugsnag.New()` to create multiple independent instances of -Bugsnag. You can use these without calling `bugsnag.Configure()`, but bear in -mind that until you call `bugsnag.Configure()` unhandled panics will not be -sent to bugsnag. - -```go -notifier := bugsnag.New(bugsnag.Configuration{ - APIKey: "YOUR_OTHER_API_KEY", -}) -``` - -In fact any place that lets you pass in `rawData` also allows you to pass in -configuration. For example to send http errors to one bugsnag project, you -could do: - -```go -bugsnag.Handler(nil, bugsnag.Configuration{APIKey: "YOUR_OTHER_API_KEY"}) -``` - -### GroupingHash - -If you need to override Bugsnag's grouping algorithm, you can set the -`GroupingHash` in an `OnBeforeNotify`: - -```go -bugsnag.OnBeforeNotify( - func (event *bugsnag.Event, config *bugsnag.Configuration) error { - event.GroupingHash = calculateGroupingHash(event) - return nil - }) -``` diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/appengine.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/appengine.go deleted file mode 100644 index 73aa2d77..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/appengine.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build appengine - -package bugsnag - -import ( - "appengine" - "appengine/urlfetch" - "appengine/user" - "fmt" - "log" - "net/http" -) - -func defaultPanicHandler() {} - -func init() { - OnBeforeNotify(appengineMiddleware) -} - -func appengineMiddleware(event *Event, config *Configuration) (err error) { - var c appengine.Context - - for _, datum := range event.RawData { - if r, ok := datum.(*http.Request); ok { - c = appengine.NewContext(r) - break - } else if context, ok := datum.(appengine.Context); ok { - c = context - break - } - } - - if c == nil { - return fmt.Errorf("No appengine context given") - } - - // You can only use the builtin http library if you pay for appengine, - // so we use the appengine urlfetch service instead. - config.Transport = &urlfetch.Transport{ - Context: c, - } - - // Anything written to stderr/stdout is discarded, so lets log to the request. - config.Logger = log.New(appengineWriter{c}, config.Logger.Prefix(), config.Logger.Flags()) - - // Set the releaseStage appropriately - if config.ReleaseStage == "" { - if appengine.IsDevAppServer() { - config.ReleaseStage = "development" - } else { - config.ReleaseStage = "production" - } - } - - if event.User == nil { - u := user.Current(c) - if u != nil { - event.User = &User{ - Id: u.ID, - Email: u.Email, - } - } - } - - return nil -} - -// Convert an appengine.Context into an io.Writer so we can create a log.Logger. -type appengineWriter struct { - appengine.Context -} - -func (c appengineWriter) Write(b []byte) (int, error) { - c.Warningf(string(b)) - return len(b), nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag.go deleted file mode 100644 index acd0fed3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag.go +++ /dev/null @@ -1,131 +0,0 @@ -package bugsnag - -import ( - "github.com/bugsnag/bugsnag-go/errors" - "log" - "net/http" - "os" - "sync" - - // Fixes a bug with SHA-384 intermediate certs on some platforms. - // - https://github.com/bugsnag/bugsnag-go/issues/9 - _ "crypto/sha512" -) - -// The current version of bugsnag-go. -const VERSION = "1.0.2" - -var once sync.Once -var middleware middlewareStack - -// The configuration for the default bugsnag notifier. -var Config Configuration - -var defaultNotifier = Notifier{&Config, nil} - -// Configure Bugsnag. The only required setting is the APIKey, which can be -// obtained by clicking on "Settings" in your Bugsnag dashboard. This function -// is also responsible for installing the global panic handler, so it should be -// called as early as possible in your initialization process. -func Configure(config Configuration) { - Config.update(&config) - once.Do(Config.PanicHandler) -} - -// Notify sends an error to Bugsnag along with the current stack trace. The -// rawData is used to send extra information along with the error. For example -// you can pass the current http.Request to Bugsnag to see information about it -// in the dashboard, or set the severity of the notification. -func Notify(err error, rawData ...interface{}) error { - return defaultNotifier.Notify(errors.New(err, 1), rawData...) -} - -// AutoNotify logs a panic on a goroutine and then repanics. -// It should only be used in places that have existing panic handlers further -// up the stack. See bugsnag.Recover(). The rawData is used to send extra -// information along with any panics that are handled this way. -// Usage: defer bugsnag.AutoNotify() -func AutoNotify(rawData ...interface{}) { - if err := recover(); err != nil { - rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityError) - defaultNotifier.Notify(errors.New(err, 2), rawData...) - panic(err) - } -} - -// Recover logs a panic on a goroutine and then recovers. -// The rawData is used to send extra information along with -// any panics that are handled this way -// Usage: defer bugsnag.Recover() -func Recover(rawData ...interface{}) { - if err := recover(); err != nil { - rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityWarning) - defaultNotifier.Notify(errors.New(err, 2), rawData...) - } -} - -// OnBeforeNotify adds a callback to be run before a notification is sent to -// Bugsnag. It can be used to modify the event or its MetaData. Changes made -// to the configuration are local to notifying about this event. To prevent the -// event from being sent to Bugsnag return an error, this error will be -// returned from bugsnag.Notify() and the event will not be sent. -func OnBeforeNotify(callback func(event *Event, config *Configuration) error) { - middleware.OnBeforeNotify(callback) -} - -// Handler creates an http Handler that notifies Bugsnag any panics that -// happen. It then repanics so that the default http Server panic handler can -// handle the panic too. The rawData is used to send extra information along -// with any panics that are handled this way. -func Handler(h http.Handler, rawData ...interface{}) http.Handler { - notifier := New(rawData...) - if h == nil { - h = http.DefaultServeMux - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer notifier.AutoNotify(r) - h.ServeHTTP(w, r) - }) -} - -// HandlerFunc creates an http HandlerFunc that notifies Bugsnag about any -// panics that happen. It then repanics so that the default http Server panic -// handler can handle the panic too. The rawData is used to send extra -// information along with any panics that are handled this way. If you have -// already wrapped your http server using bugsnag.Handler() you don't also need -// to wrap each HandlerFunc. -func HandlerFunc(h http.HandlerFunc, rawData ...interface{}) http.HandlerFunc { - notifier := New(rawData...) - - return func(w http.ResponseWriter, r *http.Request) { - defer notifier.AutoNotify(r) - h(w, r) - } -} - -func init() { - // Set up builtin middlewarez - OnBeforeNotify(httpRequestMiddleware) - - // Default configuration - Config.update(&Configuration{ - APIKey: "", - Endpoint: "https://notify.bugsnag.com/", - Hostname: "", - AppVersion: "", - ReleaseStage: "", - ParamsFilters: []string{"password", "secret"}, - // * for app-engine - ProjectPackages: []string{"main*"}, - NotifyReleaseStages: nil, - Logger: log.New(os.Stdout, log.Prefix(), log.Flags()), - PanicHandler: defaultPanicHandler, - Transport: http.DefaultTransport, - }) - - hostname, err := os.Hostname() - if err == nil { - Config.Hostname = hostname - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag_test.go deleted file mode 100644 index 9f6a52ca..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag_test.go +++ /dev/null @@ -1,461 +0,0 @@ -package bugsnag - -import ( - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "strings" - "sync" - "testing" - "time" - - "github.com/bitly/go-simplejson" -) - -func TestConfigure(t *testing.T) { - Configure(Configuration{ - APIKey: testAPIKey, - }) - - if Config.APIKey != testAPIKey { - t.Errorf("Setting APIKey didn't work") - } - - if New().Config.APIKey != testAPIKey { - t.Errorf("Setting APIKey didn't work for new notifiers") - } -} - -var postedJSON = make(chan []byte, 10) -var testOnce sync.Once -var testEndpoint string -var testAPIKey = "166f5ad3590596f9aa8d601ea89af845" - -func startTestServer() { - testOnce.Do(func() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) - if err != nil { - panic(err) - } - postedJSON <- body - }) - - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic(err) - } - testEndpoint = "http://" + l.Addr().String() + "/" - - go http.Serve(l, mux) - }) -} - -type _recurse struct { - *_recurse -} - -func TestNotify(t *testing.T) { - startTestServer() - - recurse := _recurse{} - recurse._recurse = &recurse - - OnBeforeNotify(func(event *Event, config *Configuration) error { - if event.Context == "testing" { - event.GroupingHash = "lol" - } - return nil - }) - - Notify(fmt.Errorf("hello world"), - Configuration{ - APIKey: testAPIKey, - Endpoint: testEndpoint, - ReleaseStage: "test", - AppVersion: "1.2.3", - Hostname: "web1", - ProjectPackages: []string{"github.com/bugsnag/bugsnag-go"}, - }, - User{Id: "123", Name: "Conrad", Email: "me@cirw.in"}, - Context{"testing"}, - MetaData{"test": { - "password": "sneaky", - "value": "able", - "broken": complex(1, 2), - "recurse": recurse, - }}, - ) - - json, err := simplejson.NewJson(<-postedJSON) - - if err != nil { - t.Fatal(err) - } - - if json.Get("apiKey").MustString() != testAPIKey { - t.Errorf("Wrong api key in payload") - } - - if json.GetPath("notifier", "name").MustString() != "Bugsnag Go" { - t.Errorf("Wrong notifier name in payload") - } - - event := json.Get("events").GetIndex(0) - - for k, value := range map[string]string{ - "payloadVersion": "2", - "severity": "warning", - "context": "testing", - "groupingHash": "lol", - "app.releaseStage": "test", - "app.version": "1.2.3", - "device.hostname": "web1", - "user.id": "123", - "user.name": "Conrad", - "user.email": "me@cirw.in", - "metaData.test.password": "[REDACTED]", - "metaData.test.value": "able", - "metaData.test.broken": "[complex128]", - "metaData.test.recurse._recurse": "[RECURSION]", - } { - key := strings.Split(k, ".") - if event.GetPath(key...).MustString() != value { - t.Errorf("Wrong %v: %v != %v", key, event.GetPath(key...).MustString(), value) - } - } - - exception := event.Get("exceptions").GetIndex(0) - - if exception.Get("message").MustString() != "hello world" { - t.Errorf("Wrong message in payload") - } - - if exception.Get("errorClass").MustString() != "*errors.errorString" { - t.Errorf("Wrong errorClass in payload: %v", exception.Get("errorClass").MustString()) - } - - frame0 := exception.Get("stacktrace").GetIndex(0) - if frame0.Get("file").MustString() != "bugsnag_test.go" || - frame0.Get("method").MustString() != "TestNotify" || - frame0.Get("inProject").MustBool() != true || - frame0.Get("lineNumber").MustInt() == 0 { - t.Errorf("Wrong frame0") - } - - frame1 := exception.Get("stacktrace").GetIndex(1) - - if frame1.Get("file").MustString() != "testing/testing.go" || - frame1.Get("method").MustString() != "tRunner" || - frame1.Get("inProject").MustBool() != false || - frame1.Get("lineNumber").MustInt() == 0 { - t.Errorf("Wrong frame1") - } -} - -func crashyHandler(w http.ResponseWriter, r *http.Request) { - c := make(chan int) - close(c) - c <- 1 -} - -func runCrashyServer(rawData ...interface{}) (net.Listener, error) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return nil, err - } - - mux := http.NewServeMux() - mux.HandleFunc("/", crashyHandler) - srv := http.Server{ - Addr: l.Addr().String(), - Handler: Handler(mux, rawData...), - ErrorLog: log.New(ioutil.Discard, log.Prefix(), 0), - } - - go srv.Serve(l) - return l, err -} - -func TestHandler(t *testing.T) { - startTestServer() - - l, err := runCrashyServer(Configuration{ - APIKey: testAPIKey, - Endpoint: testEndpoint, - ProjectPackages: []string{"github.com/bugsnag/bugsnag-go"}, - Logger: log.New(ioutil.Discard, log.Prefix(), log.Flags()), - }, SeverityInfo) - if err != nil { - t.Fatal(err) - } - http.Get("http://" + l.Addr().String() + "/ok?foo=bar") - l.Close() - - json, err := simplejson.NewJson(<-postedJSON) - if err != nil { - t.Fatal(err) - } - - if json.Get("apiKey").MustString() != testAPIKey { - t.Errorf("Wrong api key in payload") - } - - if json.GetPath("notifier", "name").MustString() != "Bugsnag Go" { - t.Errorf("Wrong notifier name in payload") - } - - event := json.Get("events").GetIndex(0) - - for k, value := range map[string]string{ - "payloadVersion": "2", - "severity": "info", - "user.id": "127.0.0.1", - "metaData.Request.Url": "http://" + l.Addr().String() + "/ok?foo=bar", - "metaData.Request.Method": "GET", - } { - key := strings.Split(k, ".") - if event.GetPath(key...).MustString() != value { - t.Errorf("Wrong %v: %v != %v", key, event.GetPath(key...).MustString(), value) - } - } - - if event.GetPath("metaData", "Request", "Params", "foo").GetIndex(0).MustString() != "bar" { - t.Errorf("missing GET params in request metadata") - } - - if event.GetPath("metaData", "Headers", "Accept-Encoding").GetIndex(0).MustString() != "gzip" { - t.Errorf("missing GET params in request metadata: %v", event.GetPath("metaData", "Headers")) - } - - exception := event.Get("exceptions").GetIndex(0) - - if exception.Get("message").MustString() != "runtime error: send on closed channel" { - t.Errorf("Wrong message in payload: %v", exception.Get("message").MustString()) - } - - if exception.Get("errorClass").MustString() != "runtime.errorCString" { - t.Errorf("Wrong errorClass in payload: %v", exception.Get("errorClass").MustString()) - } - - // TODO:CI these are probably dependent on go version. - frame0 := exception.Get("stacktrace").GetIndex(0) - if frame0.Get("file").MustString() != "runtime/panic.c" || - frame0.Get("method").MustString() != "panicstring" || - frame0.Get("inProject").MustBool() != false || - frame0.Get("lineNumber").MustInt() == 0 { - t.Errorf("Wrong frame0: %v", frame0) - } - - frame3 := exception.Get("stacktrace").GetIndex(3) - - if frame3.Get("file").MustString() != "bugsnag_test.go" || - frame3.Get("method").MustString() != "crashyHandler" || - frame3.Get("inProject").MustBool() != true || - frame3.Get("lineNumber").MustInt() == 0 { - t.Errorf("Wrong frame3: %v", frame3) - } -} - -func TestAutoNotify(t *testing.T) { - - var panicked interface{} - - func() { - defer func() { - panicked = recover() - }() - defer AutoNotify(Configuration{Endpoint: testEndpoint, APIKey: testAPIKey}) - - panic("eggs") - }() - - if panicked.(string) != "eggs" { - t.Errorf("didn't re-panic") - } - - json, err := simplejson.NewJson(<-postedJSON) - if err != nil { - t.Fatal(err) - } - - event := json.Get("events").GetIndex(0) - - if event.Get("severity").MustString() != "error" { - t.Errorf("severity should be error") - } - exception := event.Get("exceptions").GetIndex(0) - - if exception.Get("message").MustString() != "eggs" { - t.Errorf("caught wrong panic") - } -} - -func TestRecover(t *testing.T) { - var panicked interface{} - - func() { - defer func() { - panicked = recover() - }() - defer Recover(Configuration{Endpoint: testEndpoint, APIKey: testAPIKey}) - - panic("ham") - }() - - if panicked != nil { - t.Errorf("re-panick'd") - } - - json, err := simplejson.NewJson(<-postedJSON) - if err != nil { - t.Fatal(err) - } - - event := json.Get("events").GetIndex(0) - - if event.Get("severity").MustString() != "warning" { - t.Errorf("severity should be warning") - } - exception := event.Get("exceptions").GetIndex(0) - - if exception.Get("message").MustString() != "ham" { - t.Errorf("caught wrong panic") - } -} - -func handleGet(w http.ResponseWriter, r *http.Request) { - -} - -var createAccount = handleGet - -type _job struct { - Name string - Process func() -} - -func ExampleAutoNotify() interface{} { - return func(w http.ResponseWriter, request *http.Request) { - defer AutoNotify(request, Context{"createAccount"}) - - createAccount(w, request) - } -} - -func ExampleRecover(job _job) { - go func() { - defer Recover(Context{job.Name}, SeverityWarning) - - job.Process() - }() -} - -func ExampleConfigure() { - Configure(Configuration{ - APIKey: "YOUR_API_KEY_HERE", - - ReleaseStage: "production", - - // See Configuration{} for other fields - }) -} - -func ExampleHandler() { - // Set up your http handlers as usual - http.HandleFunc("/", handleGet) - - // use bugsnag.Handler(nil) to wrap the default http handlers - // so that Bugsnag is automatically notified about panics. - http.ListenAndServe(":1234", Handler(nil)) -} - -func ExampleHandler_customServer() { - // If you're using a custom server, set the handlers explicitly. - http.HandleFunc("/", handleGet) - - srv := http.Server{ - Addr: ":1234", - ReadTimeout: 10 * time.Second, - // use bugsnag.Handler(nil) to wrap the default http handlers - // so that Bugsnag is automatically notified about panics. - Handler: Handler(nil), - } - srv.ListenAndServe() -} - -func ExampleHandler_customHandlers() { - // If you're using custom handlers, wrap the handlers explicitly. - handler := http.NewServeMux() - http.HandleFunc("/", handleGet) - // use bugsnag.Handler(handler) to wrap the handlers so that Bugsnag is - // automatically notified about panics - http.ListenAndServe(":1234", Handler(handler)) -} - -func ExampleNotify() { - _, err := net.Listen("tcp", ":80") - - if err != nil { - Notify(err) - } -} - -func ExampleNotify_details(userID string) { - _, err := net.Listen("tcp", ":80") - - if err != nil { - Notify(err, - // show as low-severity - SeverityInfo, - // set the context - Context{"createlistener"}, - // pass the user id in to count users affected. - User{Id: userID}, - // custom meta-data tab - MetaData{ - "Listen": { - "Protocol": "tcp", - "Port": "80", - }, - }, - ) - } - -} - -type Job struct { - Retry bool - UserId string - UserEmail string - Name string - Params map[string]string -} - -func ExampleOnBeforeNotify() { - OnBeforeNotify(func(event *Event, config *Configuration) error { - - // Search all the RawData for any *Job pointers that we're passed in - // to bugsnag.Notify() and friends. - for _, datum := range event.RawData { - if job, ok := datum.(*Job); ok { - // don't notify bugsnag about errors in retries - if job.Retry { - return fmt.Errorf("bugsnag middleware: not notifying about job retry") - } - - // add the job as a tab on Bugsnag.com - event.MetaData.AddStruct("Job", job) - - // set the user correctly - event.User = &User{Id: job.UserId, Email: job.UserEmail} - } - } - - // continue notifying as normal - return nil - }) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration.go deleted file mode 100644 index 7ff26e56..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration.go +++ /dev/null @@ -1,159 +0,0 @@ -package bugsnag - -import ( - "log" - "net/http" - "path/filepath" - "strings" -) - -// Configuration sets up and customizes communication with the Bugsnag API. -type Configuration struct { - // Your Bugsnag API key, e.g. "c9d60ae4c7e70c4b6c4ebd3e8056d2b8". You can - // find this by clicking Settings on https://bugsnag.com/. - APIKey string - // The Endpoint to notify about crashes. This defaults to - // "https://notify.bugsnag.com/", if you're using Bugsnag Enterprise then - // set it to your internal Bugsnag endpoint. - Endpoint string - - // The current release stage. This defaults to "production" and is used to - // filter errors in the Bugsnag dashboard. - ReleaseStage string - // The currently running version of the app. This is used to filter errors - // in the Bugsnag dasboard. If you set this then Bugsnag will only re-open - // resolved errors if they happen in different app versions. - AppVersion string - // The hostname of the current server. This defaults to the return value of - // os.Hostname() and is graphed in the Bugsnag dashboard. - Hostname string - - // The Release stages to notify in. If you set this then bugsnag-go will - // only send notifications to Bugsnag if the ReleaseStage is listed here. - NotifyReleaseStages []string - - // packages that are part of your app. Bugsnag uses this to determine how - // to group errors and how to display them on your dashboard. You should - // include any packages that are part of your app, and exclude libraries - // and helpers. You can list wildcards here, and they'll be expanded using - // filepath.Glob. The default value is []string{"main*"} - ProjectPackages []string - - // Any meta-data that matches these filters will be marked as [REDACTED] - // before sending a Notification to Bugsnag. It defaults to - // []string{"password", "secret"} so that request parameters like password, - // password_confirmation and auth_secret will not be sent to Bugsnag. - ParamsFilters []string - - // The PanicHandler is used by Bugsnag to catch unhandled panics in your - // application. The default panicHandler uses mitchellh's panicwrap library, - // and you can disable this feature by passing an empty: func() {} - PanicHandler func() - - // The logger that Bugsnag should log to. Uses the same defaults as go's - // builtin logging package. bugsnag-go logs whenever it notifies Bugsnag - // of an error, and when any error occurs inside the library itself. - Logger *log.Logger - // The http Transport to use, defaults to the default http Transport. This - // can be configured if you are in an environment like Google App Engine - // that has stringent conditions on making http requests. - Transport http.RoundTripper - // Whether bugsnag should notify synchronously. This defaults to false which - // causes bugsnag-go to spawn a new goroutine for each notification. - Synchronous bool - // TODO: remember to update the update() function when modifying this struct -} - -func (config *Configuration) update(other *Configuration) *Configuration { - if other.APIKey != "" { - config.APIKey = other.APIKey - } - if other.Endpoint != "" { - config.Endpoint = other.Endpoint - } - if other.Hostname != "" { - config.Hostname = other.Hostname - } - if other.AppVersion != "" { - config.AppVersion = other.AppVersion - } - if other.ReleaseStage != "" { - config.ReleaseStage = other.ReleaseStage - } - if other.ParamsFilters != nil { - config.ParamsFilters = other.ParamsFilters - } - if other.ProjectPackages != nil { - config.ProjectPackages = other.ProjectPackages - } - if other.Logger != nil { - config.Logger = other.Logger - } - if other.NotifyReleaseStages != nil { - config.NotifyReleaseStages = other.NotifyReleaseStages - } - if other.PanicHandler != nil { - config.PanicHandler = other.PanicHandler - } - if other.Transport != nil { - config.Transport = other.Transport - } - if other.Synchronous { - config.Synchronous = true - } - - return config -} - -func (config *Configuration) merge(other *Configuration) *Configuration { - return config.clone().update(other) -} - -func (config *Configuration) clone() *Configuration { - clone := *config - return &clone -} - -func (config *Configuration) isProjectPackage(pkg string) bool { - for _, p := range config.ProjectPackages { - if match, _ := filepath.Match(p, pkg); match { - return true - } - } - return false -} - -func (config *Configuration) stripProjectPackages(file string) string { - for _, p := range config.ProjectPackages { - if len(p) > 2 && p[len(p)-2] == '/' && p[len(p)-1] == '*' { - p = p[:len(p)-1] - } else { - p = p + "/" - } - if strings.HasPrefix(file, p) { - return strings.TrimPrefix(file, p) - } - } - - return file -} - -func (config *Configuration) log(fmt string, args ...interface{}) { - if config != nil && config.Logger != nil { - config.Logger.Printf(fmt, args...) - } else { - log.Printf(fmt, args...) - } -} - -func (config *Configuration) notifyInReleaseStage() bool { - if config.NotifyReleaseStages == nil { - return true - } - for _, r := range config.NotifyReleaseStages { - if r == config.ReleaseStage { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration_test.go deleted file mode 100644 index 2bd34889..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package bugsnag - -import ( - "testing" -) - -func TestNotifyReleaseStages(t *testing.T) { - - var testCases = []struct { - stage string - configured []string - notify bool - msg string - }{ - { - stage: "production", - notify: true, - msg: "Should notify in all release stages by default", - }, - { - stage: "production", - configured: []string{"development", "production"}, - notify: true, - msg: "Failed to notify in configured release stage", - }, - { - stage: "staging", - configured: []string{"development", "production"}, - notify: false, - msg: "Failed to prevent notification in excluded release stage", - }, - } - - for _, testCase := range testCases { - Configure(Configuration{ReleaseStage: testCase.stage, NotifyReleaseStages: testCase.configured}) - - if Config.notifyInReleaseStage() != testCase.notify { - t.Error(testCase.msg) - } - } -} - -func TestProjectPackages(t *testing.T) { - Configure(Configuration{ProjectPackages: []string{"main", "github.com/ConradIrwin/*"}}) - if !Config.isProjectPackage("main") { - t.Error("literal project package doesn't work") - } - if !Config.isProjectPackage("github.com/ConradIrwin/foo") { - t.Error("wildcard project package doesn't work") - } - if Config.isProjectPackage("runtime") { - t.Error("wrong packges being marked in project") - } - if Config.isProjectPackage("github.com/ConradIrwin/foo/bar") { - t.Error("wrong packges being marked in project") - } - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/doc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/doc.go deleted file mode 100644 index 827e03b8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Package bugsnag captures errors in real-time and reports them to Bugsnag (http://bugsnag.com). - -Using bugsnag-go is a three-step process. - -1. As early as possible in your program configure the notifier with your APIKey. This sets up -handling of panics that would otherwise crash your app. - - func init() { - bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", - }) - } - -2. Add bugsnag to places that already catch panics. For example you should add it to the HTTP server -when you call ListenAndServer: - - http.ListenAndServe(":8080", bugsnag.Handler(nil)) - -If that's not possible, for example because you're using Google App Engine, you can also wrap each -HTTP handler manually: - - http.HandleFunc("/" bugsnag.HandlerFunc(func (w http.ResponseWriter, r *http.Request) { - ... - }) - -3. To notify Bugsnag of an error that is not a panic, pass it to bugsnag.Notify. This will also -log the error message using the configured Logger. - - if err != nil { - bugsnag.Notify(err) - } - -For detailed integration instructions see https://bugsnag.com/docs/notifiers/go. - -Configuration - -The only required configuration is the Bugsnag API key which can be obtained by clicking "Settings" -on the top of https://bugsnag.com/ after signing up. We also recommend you set the ReleaseStage -and AppVersion if these make sense for your deployment workflow. - -RawData - -If you need to attach extra data to Bugsnag notifications you can do that using -the rawData mechanism. Most of the functions that send errors to Bugsnag allow -you to pass in any number of interface{} values as rawData. The rawData can -consist of the Severity, Context, User or MetaData types listed below, and -there is also builtin support for *http.Requests. - - bugsnag.Notify(err, bugsnag.SeverityError) - -If you want to add custom tabs to your bugsnag dashboard you can pass any value in as rawData, -and then process it into the event's metadata using a bugsnag.OnBeforeNotify() hook. - - bugsnag.Notify(err, account) - - bugsnag.OnBeforeNotify(func (e *bugsnag.Event, c *bugsnag.Configuration) { - for datum := range e.RawData { - if account, ok := datum.(Account); ok { - e.MetaData.Add("account", "name", account.Name) - e.MetaData.Add("account", "url", account.URL) - } - } - }) - -If necessary you can pass Configuration in as rawData, or modify the Configuration object passed -into OnBeforeNotify hooks. Configuration passed in this way only affects the current notification. -*/ -package bugsnag diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/README.md deleted file mode 100644 index 8d8e097a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Adds stacktraces to errors in golang. - -This was made to help build the Bugsnag notifier but can be used standalone if -you like to have stacktraces on errors. - -See [Godoc](https://godoc.org/github.com/bugsnag/bugsnag-go/errors) for the API docs. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error.go deleted file mode 100644 index 0081c0a8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package errors provides errors that have stack-traces. -package errors - -import ( - "bytes" - "fmt" - "reflect" - "runtime" -) - -// The maximum number of stackframes on any error. -var MaxStackDepth = 50 - -// Error is an error with an attached stacktrace. It can be used -// wherever the builtin error interface is expected. -type Error struct { - Err error - stack []uintptr - frames []StackFrame -} - -// New makes an Error from the given value. If that value is already an -// error then it will be used directly, if not, it will be passed to -// fmt.Errorf("%v"). The skip parameter indicates how far up the stack -// to start the stacktrace. 0 is from the current call, 1 from its caller, etc. -func New(e interface{}, skip int) *Error { - var err error - - switch e := e.(type) { - case *Error: - return e - case error: - err = e - default: - err = fmt.Errorf("%v", e) - } - - stack := make([]uintptr, MaxStackDepth) - length := runtime.Callers(2+skip, stack[:]) - return &Error{ - Err: err, - stack: stack[:length], - } -} - -// Errorf creates a new error with the given message. You can use it -// as a drop-in replacement for fmt.Errorf() to provide descriptive -// errors in return values. -func Errorf(format string, a ...interface{}) *Error { - return New(fmt.Errorf(format, a...), 1) -} - -// Error returns the underlying error's message. -func (err *Error) Error() string { - return err.Err.Error() -} - -// Stack returns the callstack formatted the same way that go does -// in runtime/debug.Stack() -func (err *Error) Stack() []byte { - buf := bytes.Buffer{} - - for _, frame := range err.StackFrames() { - buf.WriteString(frame.String()) - } - - return buf.Bytes() -} - -// StackFrames returns an array of frames containing information about the -// stack. -func (err *Error) StackFrames() []StackFrame { - if err.frames == nil { - err.frames = make([]StackFrame, len(err.stack)) - - for i, pc := range err.stack { - err.frames[i] = NewStackFrame(pc) - } - } - - return err.frames -} - -// TypeName returns the type this error. e.g. *errors.stringError. -func (err *Error) TypeName() string { - if _, ok := err.Err.(uncaughtPanic); ok { - return "panic" - } - return reflect.TypeOf(err.Err).String() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error_test.go deleted file mode 100644 index 95232ea2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package errors - -import ( - "bytes" - "fmt" - "io" - "runtime/debug" - "testing" -) - -func TestStackFormatMatches(t *testing.T) { - - defer func() { - err := recover() - if err != 'a' { - t.Fatal(err) - } - - bs := [][]byte{Errorf("hi").Stack(), debug.Stack()} - - // Ignore the first line (as it contains the PC of the .Stack() call) - bs[0] = bytes.SplitN(bs[0], []byte("\n"), 2)[1] - bs[1] = bytes.SplitN(bs[1], []byte("\n"), 2)[1] - - if bytes.Compare(bs[0], bs[1]) != 0 { - t.Errorf("Stack didn't match") - t.Errorf("%s", bs[0]) - t.Errorf("%s", bs[1]) - } - }() - - a() -} - -func TestSkipWorks(t *testing.T) { - - defer func() { - err := recover() - if err != 'a' { - t.Fatal(err) - } - - bs := [][]byte{New("hi", 2).Stack(), debug.Stack()} - - // should skip four lines of debug.Stack() - bs[1] = bytes.SplitN(bs[1], []byte("\n"), 5)[4] - - if bytes.Compare(bs[0], bs[1]) != 0 { - t.Errorf("Stack didn't match") - t.Errorf("%s", bs[0]) - t.Errorf("%s", bs[1]) - } - }() - - a() -} - -func TestNewError(t *testing.T) { - - e := func() error { - return New("hi", 1) - }() - - if e.Error() != "hi" { - t.Errorf("Constructor with a string failed") - } - - if New(fmt.Errorf("yo"), 0).Error() != "yo" { - t.Errorf("Constructor with an error failed") - } - - if New(e, 0) != e { - t.Errorf("Constructor with an Error failed") - } - - if New(nil, 0).Error() != "" { - t.Errorf("Constructor with nil failed") - } -} - -func ExampleErrorf(x int) (int, error) { - if x%2 == 1 { - return 0, Errorf("can only halve even numbers, got %d", x) - } - return x / 2, nil -} - -func ExampleNewError() (error, error) { - // Wrap io.EOF with the current stack-trace and return it - return nil, New(io.EOF, 0) -} - -func ExampleNewError_skip() { - defer func() { - if err := recover(); err != nil { - // skip 1 frame (the deferred function) and then return the wrapped err - err = New(err, 1) - } - }() -} - -func ExampleError_Stack(err Error) { - fmt.Printf("Error: %s\n%s", err.Error(), err.Stack()) -} - -func a() error { - b(5) - return nil -} - -func b(i int) { - c() -} - -func c() { - panic('a') -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic.go deleted file mode 100644 index cc37052d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic.go +++ /dev/null @@ -1,127 +0,0 @@ -package errors - -import ( - "strconv" - "strings" -) - -type uncaughtPanic struct{ message string } - -func (p uncaughtPanic) Error() string { - return p.message -} - -// ParsePanic allows you to get an error object from the output of a go program -// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap. -func ParsePanic(text string) (*Error, error) { - lines := strings.Split(text, "\n") - - state := "start" - - var message string - var stack []StackFrame - - for i := 0; i < len(lines); i++ { - line := lines[i] - - if state == "start" { - if strings.HasPrefix(line, "panic: ") { - message = strings.TrimPrefix(line, "panic: ") - state = "seek" - } else { - return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line) - } - - } else if state == "seek" { - if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") { - state = "parsing" - } - - } else if state == "parsing" { - if line == "" { - state = "done" - break - } - createdBy := false - if strings.HasPrefix(line, "created by ") { - line = strings.TrimPrefix(line, "created by ") - createdBy = true - } - - i++ - - if i >= len(lines) { - return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line) - } - - frame, err := parsePanicFrame(line, lines[i], createdBy) - if err != nil { - return nil, err - } - - stack = append(stack, *frame) - if createdBy { - state = "done" - break - } - } - } - - if state == "done" || state == "parsing" { - return &Error{Err: uncaughtPanic{message}, frames: stack}, nil - } - return nil, Errorf("could not parse panic: %v", text) -} - -// The lines we're passing look like this: -// -// main.(*foo).destruct(0xc208067e98) -// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151 -func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) { - idx := strings.LastIndex(name, "(") - if idx == -1 && !createdBy { - return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name) - } - if idx != -1 { - name = name[:idx] - } - pkg := "" - - if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { - pkg += name[:lastslash] + "/" - name = name[lastslash+1:] - } - if period := strings.Index(name, "."); period >= 0 { - pkg += name[:period] - name = name[period+1:] - } - - name = strings.Replace(name, "·", ".", -1) - - if !strings.HasPrefix(line, "\t") { - return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line) - } - - idx = strings.LastIndex(line, ":") - if idx == -1 { - return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line) - } - file := line[1:idx] - - number := line[idx+1:] - if idx = strings.Index(number, " +"); idx > -1 { - number = number[:idx] - } - - lno, err := strconv.ParseInt(number, 10, 32) - if err != nil { - return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line) - } - - return &StackFrame{ - File: file, - LineNumber: int(lno), - Package: pkg, - Name: name, - }, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic_test.go deleted file mode 100644 index f9ed7845..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package errors - -import ( - "reflect" - "testing" -) - -var createdBy = `panic: hello! - -goroutine 54 [running]: -runtime.panic(0x35ce40, 0xc208039db0) - /0/c/go/src/pkg/runtime/panic.c:279 +0xf5 -github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers.func·001() - /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go:13 +0x74 -net/http.(*Server).Serve(0xc20806c780, 0x910c88, 0xc20803e168, 0x0, 0x0) - /0/c/go/src/pkg/net/http/server.go:1698 +0x91 -created by github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers.App.Index - /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go:14 +0x3e - -goroutine 16 [IO wait]: -net.runtime_pollWait(0x911c30, 0x72, 0x0) - /0/c/go/src/pkg/runtime/netpoll.goc:146 +0x66 -net.(*pollDesc).Wait(0xc2080ba990, 0x72, 0x0, 0x0) - /0/c/go/src/pkg/net/fd_poll_runtime.go:84 +0x46 -net.(*pollDesc).WaitRead(0xc2080ba990, 0x0, 0x0) - /0/c/go/src/pkg/net/fd_poll_runtime.go:89 +0x42 -net.(*netFD).accept(0xc2080ba930, 0x58be30, 0x0, 0x9103f0, 0x23) - /0/c/go/src/pkg/net/fd_unix.go:409 +0x343 -net.(*TCPListener).AcceptTCP(0xc20803e168, 0x8, 0x0, 0x0) - /0/c/go/src/pkg/net/tcpsock_posix.go:234 +0x5d -net.(*TCPListener).Accept(0xc20803e168, 0x0, 0x0, 0x0, 0x0) - /0/c/go/src/pkg/net/tcpsock_posix.go:244 +0x4b -github.com/revel/revel.Run(0xe6d9) - /0/go/src/github.com/revel/revel/server.go:113 +0x926 -main.main() - /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/tmp/main.go:109 +0xe1a -` - -var normalSplit = `panic: hello! - -goroutine 54 [running]: -runtime.panic(0x35ce40, 0xc208039db0) - /0/c/go/src/pkg/runtime/panic.c:279 +0xf5 -github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers.func·001() - /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go:13 +0x74 -net/http.(*Server).Serve(0xc20806c780, 0x910c88, 0xc20803e168, 0x0, 0x0) - /0/c/go/src/pkg/net/http/server.go:1698 +0x91 - -goroutine 16 [IO wait]: -net.runtime_pollWait(0x911c30, 0x72, 0x0) - /0/c/go/src/pkg/runtime/netpoll.goc:146 +0x66 -net.(*pollDesc).Wait(0xc2080ba990, 0x72, 0x0, 0x0) - /0/c/go/src/pkg/net/fd_poll_runtime.go:84 +0x46 -net.(*pollDesc).WaitRead(0xc2080ba990, 0x0, 0x0) - /0/c/go/src/pkg/net/fd_poll_runtime.go:89 +0x42 -net.(*netFD).accept(0xc2080ba930, 0x58be30, 0x0, 0x9103f0, 0x23) - /0/c/go/src/pkg/net/fd_unix.go:409 +0x343 -net.(*TCPListener).AcceptTCP(0xc20803e168, 0x8, 0x0, 0x0) - /0/c/go/src/pkg/net/tcpsock_posix.go:234 +0x5d -net.(*TCPListener).Accept(0xc20803e168, 0x0, 0x0, 0x0, 0x0) - /0/c/go/src/pkg/net/tcpsock_posix.go:244 +0x4b -github.com/revel/revel.Run(0xe6d9) - /0/go/src/github.com/revel/revel/server.go:113 +0x926 -main.main() - /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/tmp/main.go:109 +0xe1a -` - -var lastGoroutine = `panic: hello! - -goroutine 16 [IO wait]: -net.runtime_pollWait(0x911c30, 0x72, 0x0) - /0/c/go/src/pkg/runtime/netpoll.goc:146 +0x66 -net.(*pollDesc).Wait(0xc2080ba990, 0x72, 0x0, 0x0) - /0/c/go/src/pkg/net/fd_poll_runtime.go:84 +0x46 -net.(*pollDesc).WaitRead(0xc2080ba990, 0x0, 0x0) - /0/c/go/src/pkg/net/fd_poll_runtime.go:89 +0x42 -net.(*netFD).accept(0xc2080ba930, 0x58be30, 0x0, 0x9103f0, 0x23) - /0/c/go/src/pkg/net/fd_unix.go:409 +0x343 -net.(*TCPListener).AcceptTCP(0xc20803e168, 0x8, 0x0, 0x0) - /0/c/go/src/pkg/net/tcpsock_posix.go:234 +0x5d -net.(*TCPListener).Accept(0xc20803e168, 0x0, 0x0, 0x0, 0x0) - /0/c/go/src/pkg/net/tcpsock_posix.go:244 +0x4b -github.com/revel/revel.Run(0xe6d9) - /0/go/src/github.com/revel/revel/server.go:113 +0x926 -main.main() - /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/tmp/main.go:109 +0xe1a - -goroutine 54 [running]: -runtime.panic(0x35ce40, 0xc208039db0) - /0/c/go/src/pkg/runtime/panic.c:279 +0xf5 -github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers.func·001() - /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go:13 +0x74 -net/http.(*Server).Serve(0xc20806c780, 0x910c88, 0xc20803e168, 0x0, 0x0) - /0/c/go/src/pkg/net/http/server.go:1698 +0x91 -` - -var result = []StackFrame{ - StackFrame{File: "/0/c/go/src/pkg/runtime/panic.c", LineNumber: 279, Name: "panic", Package: "runtime"}, - StackFrame{File: "/0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go", LineNumber: 13, Name: "func.001", Package: "github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers"}, - StackFrame{File: "/0/c/go/src/pkg/net/http/server.go", LineNumber: 1698, Name: "(*Server).Serve", Package: "net/http"}, -} - -var resultCreatedBy = append(result, - StackFrame{File: "/0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go", LineNumber: 14, Name: "App.Index", Package: "github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers", ProgramCounter: 0x0}) - -func TestParsePanic(t *testing.T) { - - todo := map[string]string{ - "createdBy": createdBy, - "normalSplit": normalSplit, - "lastGoroutine": lastGoroutine, - } - - for key, val := range todo { - Err, err := ParsePanic(val) - - if err != nil { - t.Fatal(err) - } - - if Err.TypeName() != "panic" { - t.Errorf("Wrong type: %s", Err.TypeName()) - } - - if Err.Error() != "hello!" { - t.Errorf("Wrong message: %s", Err.TypeName()) - } - - if Err.StackFrames()[0].Func() != nil { - t.Errorf("Somehow managed to find a func...") - } - - result := result - if key == "createdBy" { - result = resultCreatedBy - } - - if !reflect.DeepEqual(Err.StackFrames(), result) { - t.Errorf("Wrong stack for %s: %#v", key, Err.StackFrames()) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/stackframe.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/stackframe.go deleted file mode 100644 index 4edadbc5..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/stackframe.go +++ /dev/null @@ -1,97 +0,0 @@ -package errors - -import ( - "bytes" - "fmt" - "io/ioutil" - "runtime" - "strings" -) - -// A StackFrame contains all necessary information about to generate a line -// in a callstack. -type StackFrame struct { - File string - LineNumber int - Name string - Package string - ProgramCounter uintptr -} - -// NewStackFrame popoulates a stack frame object from the program counter. -func NewStackFrame(pc uintptr) (frame StackFrame) { - - frame = StackFrame{ProgramCounter: pc} - if frame.Func() == nil { - return - } - frame.Package, frame.Name = packageAndName(frame.Func()) - - // pc -1 because the program counters we use are usually return addresses, - // and we want to show the line that corresponds to the function call - frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1) - return - -} - -// Func returns the function that this stackframe corresponds to -func (frame *StackFrame) Func() *runtime.Func { - if frame.ProgramCounter == 0 { - return nil - } - return runtime.FuncForPC(frame.ProgramCounter) -} - -// String returns the stackframe formatted in the same way as go does -// in runtime/debug.Stack() -func (frame *StackFrame) String() string { - str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter) - - source, err := frame.SourceLine() - if err != nil { - return str - } - - return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source) -} - -// SourceLine gets the line of code (from File and Line) of the original source if possible -func (frame *StackFrame) SourceLine() (string, error) { - data, err := ioutil.ReadFile(frame.File) - - if err != nil { - return "", err - } - - lines := bytes.Split(data, []byte{'\n'}) - if frame.LineNumber <= 0 || frame.LineNumber >= len(lines) { - return "???", nil - } - // -1 because line-numbers are 1 based, but our array is 0 based - return string(bytes.Trim(lines[frame.LineNumber-1], " \t")), nil -} - -func packageAndName(fn *runtime.Func) (string, string) { - name := fn.Name() - pkg := "" - - // The name includes the path name to the package, which is unnecessary - // since the file name is already included. Plus, it has center dots. - // That is, we see - // runtime/debug.*T·ptrmethod - // and want - // *T.ptrmethod - // Since the package path might contains dots (e.g. code.google.com/...), - // we first remove the path prefix if there is one. - if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { - pkg += name[:lastslash] + "/" - name = name[lastslash+1:] - } - if period := strings.Index(name, "."); period >= 0 { - pkg += name[:period] - name = name[period+1:] - } - - name = strings.Replace(name, "·", ".", -1) - return pkg, name -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/event.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/event.go deleted file mode 100644 index 1586ef3f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/event.go +++ /dev/null @@ -1,134 +0,0 @@ -package bugsnag - -import ( - "strings" - - "github.com/bugsnag/bugsnag-go/errors" -) - -// Context is the context of the error in Bugsnag. -// This can be passed to Notify, Recover or AutoNotify as rawData. -type Context struct { - String string -} - -// User represents the searchable user-data on Bugsnag. The Id is also used -// to determine the number of users affected by a bug. This can be -// passed to Notify, Recover or AutoNotify as rawData. -type User struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Email string `json:"email,omitempty"` -} - -// Sets the severity of the error on Bugsnag. These values can be -// passed to Notify, Recover or AutoNotify as rawData. -var ( - SeverityError = severity{"error"} - SeverityWarning = severity{"warning"} - SeverityInfo = severity{"info"} -) - -// The severity tag type, private so that people can only use Error,Warning,Info -type severity struct { - String string -} - -// The form of stacktrace that Bugsnag expects -type stackFrame struct { - Method string `json:"method"` - File string `json:"file"` - LineNumber int `json:"lineNumber"` - InProject bool `json:"inProject,omitempty"` -} - -// Event represents a payload of data that gets sent to Bugsnag. -// This is passed to each OnBeforeNotify hook. -type Event struct { - - // The original error that caused this event, not sent to Bugsnag. - Error *errors.Error - - // The rawData affecting this error, not sent to Bugsnag. - RawData []interface{} - - // The error class to be sent to Bugsnag. This defaults to the type name of the Error, for - // example *error.String - ErrorClass string - // The error message to be sent to Bugsnag. This defaults to the return value of Error.Error() - Message string - // The stacktrrace of the error to be sent to Bugsnag. - Stacktrace []stackFrame - - // The context to be sent to Bugsnag. This should be set to the part of the app that was running, - // e.g. for http requests, set it to the path. - Context string - // The severity of the error. Can be SeverityError, SeverityWarning or SeverityInfo. - Severity severity - // The grouping hash is used to override Bugsnag's grouping. Set this if you'd like all errors with - // the same grouping hash to group together in the dashboard. - GroupingHash string - - // User data to send to Bugsnag. This is searchable on the dashboard. - User *User - // Other MetaData to send to Bugsnag. Appears as a set of tabbed tables in the dashboard. - MetaData MetaData -} - -func newEvent(err *errors.Error, rawData []interface{}, notifier *Notifier) (*Event, *Configuration) { - - config := notifier.Config - event := &Event{ - Error: err, - RawData: append(notifier.RawData, rawData...), - - ErrorClass: err.TypeName(), - Message: err.Error(), - Stacktrace: make([]stackFrame, len(err.StackFrames())), - - Severity: SeverityWarning, - - MetaData: make(MetaData), - } - - for _, datum := range event.RawData { - switch datum := datum.(type) { - case severity: - event.Severity = datum - - case Context: - event.Context = datum.String - - case Configuration: - config = config.merge(&datum) - - case MetaData: - event.MetaData.Update(datum) - - case User: - event.User = &datum - } - } - - for i, frame := range err.StackFrames() { - file := frame.File - inProject := config.isProjectPackage(frame.Package) - - // remove $GOROOT and $GOHOME from other frames - if idx := strings.Index(file, frame.Package); idx > -1 { - file = file[idx:] - } - if inProject { - file = config.stripProjectPackages(file) - } - - event.Stacktrace[i] = stackFrame{ - Method: frame.Name, - File: file, - LineNumber: frame.LineNumber, - InProject: inProject, - } - } - - return event, config -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/json_tags.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/json_tags.go deleted file mode 100644 index 45be38fa..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/json_tags.go +++ /dev/null @@ -1,43 +0,0 @@ -// The code is stripped from: -// http://golang.org/src/pkg/encoding/json/tags.go?m=text - -package bugsnag - -import ( - "strings" -) - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata.go deleted file mode 100644 index ffe64e21..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata.go +++ /dev/null @@ -1,185 +0,0 @@ -package bugsnag - -import ( - "fmt" - "reflect" - "strings" -) - -// MetaData is added to the Bugsnag dashboard in tabs. Each tab is -// a map of strings -> values. You can pass MetaData to Notify, Recover -// and AutoNotify as rawData. -type MetaData map[string]map[string]interface{} - -// Update the meta-data with more information. Tabs are merged together such -// that unique keys from both sides are preserved, and duplicate keys end up -// with the provided values. -func (meta MetaData) Update(other MetaData) { - for name, tab := range other { - - if meta[name] == nil { - meta[name] = make(map[string]interface{}) - } - - for key, value := range tab { - meta[name][key] = value - } - } -} - -// Add creates a tab of Bugsnag meta-data. -// If the tab doesn't yet exist it will be created. -// If the key already exists, it will be overwritten. -func (meta MetaData) Add(tab string, key string, value interface{}) { - if meta[tab] == nil { - meta[tab] = make(map[string]interface{}) - } - - meta[tab][key] = value -} - -// AddStruct creates a tab of Bugsnag meta-data. -// The struct will be converted to an Object using the -// reflect library so any private fields will not be exported. -// As a safety measure, if you pass a non-struct the value will be -// sent to Bugsnag under the "Extra data" tab. -func (meta MetaData) AddStruct(tab string, obj interface{}) { - val := sanitizer{}.Sanitize(obj) - content, ok := val.(map[string]interface{}) - if ok { - meta[tab] = content - } else { - // Wasn't a struct - meta.Add("Extra data", tab, obj) - } - -} - -// Remove any values from meta-data that have keys matching the filters, -// and any that are recursive data-structures -func (meta MetaData) sanitize(filters []string) interface{} { - return sanitizer{ - Filters: filters, - Seen: make([]interface{}, 0), - }.Sanitize(meta) - -} - -// The sanitizer is used to remove filtered params and recursion from meta-data. -type sanitizer struct { - Filters []string - Seen []interface{} -} - -func (s sanitizer) Sanitize(data interface{}) interface{} { - for _, s := range s.Seen { - // TODO: we don't need deep equal here, just type-ignoring equality - if reflect.DeepEqual(data, s) { - return "[RECURSION]" - } - } - - // Sanitizers are passed by value, so we can modify s and it only affects - // s.Seen for nested calls. - s.Seen = append(s.Seen, data) - - t := reflect.TypeOf(data) - v := reflect.ValueOf(data) - - switch t.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64: - return data - - case reflect.String: - return data - - case reflect.Interface, reflect.Ptr: - return s.Sanitize(v.Elem().Interface()) - - case reflect.Array, reflect.Slice: - ret := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - ret[i] = s.Sanitize(v.Index(i).Interface()) - } - return ret - - case reflect.Map: - return s.sanitizeMap(v) - - case reflect.Struct: - return s.sanitizeStruct(v, t) - - // Things JSON can't serialize: - // case t.Chan, t.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: - default: - return "[" + t.String() + "]" - - } - -} - -func (s sanitizer) sanitizeMap(v reflect.Value) interface{} { - ret := make(map[string]interface{}) - - for _, key := range v.MapKeys() { - val := s.Sanitize(v.MapIndex(key).Interface()) - newKey := fmt.Sprintf("%v", key.Interface()) - - if s.shouldRedact(newKey) { - val = "[REDACTED]" - } - - ret[newKey] = val - } - - return ret -} - -func (s sanitizer) sanitizeStruct(v reflect.Value, t reflect.Type) interface{} { - ret := make(map[string]interface{}) - - for i := 0; i < v.NumField(); i++ { - - val := v.Field(i) - // Don't export private fields - if !val.CanInterface() { - continue - } - - name := t.Field(i).Name - var opts tagOptions - - // Parse JSON tags. Supports name and "omitempty" - if jsonTag := t.Field(i).Tag.Get("json"); len(jsonTag) != 0 { - name, opts = parseTag(jsonTag) - } - - if s.shouldRedact(name) { - ret[name] = "[REDACTED]" - } else { - sanitized := s.Sanitize(val.Interface()) - if str, ok := sanitized.(string); ok { - if !(opts.Contains("omitempty") && len(str) == 0) { - ret[name] = str - } - } else { - ret[name] = sanitized - } - - } - } - - return ret -} - -func (s sanitizer) shouldRedact(key string) bool { - for _, filter := range s.Filters { - if strings.Contains(strings.ToLower(filter), strings.ToLower(key)) { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata_test.go deleted file mode 100644 index 37bfaee5..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package bugsnag - -import ( - "reflect" - "testing" - "unsafe" - - "github.com/bugsnag/bugsnag-go/errors" -) - -type _account struct { - ID string - Name string - Plan struct { - Premium bool - } - Password string - secret string - Email string `json:"email"` - EmptyEmail string `json:"emptyemail,omitempty"` - NotEmptyEmail string `json:"not_empty_email,omitempty"` -} - -type _broken struct { - Me *_broken - Data string -} - -var account = _account{} -var notifier = New(Configuration{}) - -func TestMetaDataAdd(t *testing.T) { - m := MetaData{ - "one": { - "key": "value", - "override": false, - }} - - m.Add("one", "override", true) - m.Add("one", "new", "key") - m.Add("new", "tab", account) - - m.AddStruct("lol", "not really a struct") - m.AddStruct("account", account) - - if !reflect.DeepEqual(m, MetaData{ - "one": { - "key": "value", - "override": true, - "new": "key", - }, - "new": { - "tab": account, - }, - "Extra data": { - "lol": "not really a struct", - }, - "account": { - "ID": "", - "Name": "", - "Plan": map[string]interface{}{ - "Premium": false, - }, - "Password": "", - "email": "", - }, - }) { - t.Errorf("metadata.Add didn't work: %#v", m) - } -} - -func TestMetaDataUpdate(t *testing.T) { - - m := MetaData{ - "one": { - "key": "value", - "override": false, - }} - - m.Update(MetaData{ - "one": { - "override": true, - "new": "key", - }, - "new": { - "tab": account, - }, - }) - - if !reflect.DeepEqual(m, MetaData{ - "one": { - "key": "value", - "override": true, - "new": "key", - }, - "new": { - "tab": account, - }, - }) { - t.Errorf("metadata.Update didn't work: %#v", m) - } -} - -func TestMetaDataSanitize(t *testing.T) { - - var broken = _broken{} - broken.Me = &broken - broken.Data = "ohai" - account.Name = "test" - account.ID = "test" - account.secret = "hush" - account.Email = "example@example.com" - account.EmptyEmail = "" - account.NotEmptyEmail = "not_empty_email@example.com" - - m := MetaData{ - "one": { - "bool": true, - "int": 7, - "float": 7.1, - "complex": complex(1, 1), - "func": func() {}, - "unsafe": unsafe.Pointer(broken.Me), - "string": "string", - "password": "secret", - "array": []hash{{ - "creditcard": "1234567812345678", - "broken": broken, - }}, - "broken": broken, - "account": account, - }, - } - - n := m.sanitize([]string{"password", "creditcard"}) - - if !reflect.DeepEqual(n, map[string]interface{}{ - "one": map[string]interface{}{ - "bool": true, - "int": 7, - "float": 7.1, - "complex": "[complex128]", - "string": "string", - "unsafe": "[unsafe.Pointer]", - "func": "[func()]", - "password": "[REDACTED]", - "array": []interface{}{map[string]interface{}{ - "creditcard": "[REDACTED]", - "broken": map[string]interface{}{ - "Me": "[RECURSION]", - "Data": "ohai", - }, - }}, - "broken": map[string]interface{}{ - "Me": "[RECURSION]", - "Data": "ohai", - }, - "account": map[string]interface{}{ - "ID": "test", - "Name": "test", - "Plan": map[string]interface{}{ - "Premium": false, - }, - "Password": "[REDACTED]", - "email": "example@example.com", - "not_empty_email": "not_empty_email@example.com", - }, - }, - }) { - t.Errorf("metadata.Sanitize didn't work: %#v", n) - } - -} - -func ExampleMetaData() { - notifier.Notify(errors.Errorf("hi world"), - MetaData{"Account": { - "id": account.ID, - "name": account.Name, - "paying?": account.Plan.Premium, - }}) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware.go deleted file mode 100644 index 266d5e46..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware.go +++ /dev/null @@ -1,96 +0,0 @@ -package bugsnag - -import ( - "net/http" - "strings" -) - -type ( - beforeFunc func(*Event, *Configuration) error - - // MiddlewareStacks keep middleware in the correct order. They are - // called in reverse order, so if you add a new middleware it will - // be called before all existing middleware. - middlewareStack struct { - before []beforeFunc - } -) - -// AddMiddleware adds a new middleware to the outside of the existing ones, -// when the middlewareStack is Run it will be run before all middleware that -// have been added before. -func (stack *middlewareStack) OnBeforeNotify(middleware beforeFunc) { - stack.before = append(stack.before, middleware) -} - -// Run causes all the middleware to be run. If they all permit it the next callback -// will be called with all the middleware on the stack. -func (stack *middlewareStack) Run(event *Event, config *Configuration, next func() error) error { - // run all the before filters in reverse order - for i := range stack.before { - before := stack.before[len(stack.before)-i-1] - - err := stack.runBeforeFilter(before, event, config) - if err != nil { - return err - } - } - - return next() -} - -func (stack *middlewareStack) runBeforeFilter(f beforeFunc, event *Event, config *Configuration) error { - defer func() { - if err := recover(); err != nil { - config.log("bugsnag/middleware: unexpected panic: %v", err) - } - }() - - return f(event, config) -} - -// catchMiddlewarePanic is used to log any panics that happen inside Middleware, -// we wouldn't want to not notify Bugsnag in this case. -func catchMiddlewarePanic(event *Event, config *Configuration, next func() error) { -} - -// httpRequestMiddleware is added OnBeforeNotify by default. It takes information -// from an http.Request passed in as rawData, and adds it to the Event. You can -// use this as a template for writing your own Middleware. -func httpRequestMiddleware(event *Event, config *Configuration) error { - for _, datum := range event.RawData { - if request, ok := datum.(*http.Request); ok { - proto := "http://" - if request.TLS != nil { - proto = "https://" - } - - event.MetaData.Update(MetaData{ - "Request": { - "RemoteAddr": request.RemoteAddr, - "Method": request.Method, - "Url": proto + request.Host + request.RequestURI, - "Params": request.URL.Query(), - }, - }) - - // Add headers as a separate tab. - event.MetaData.AddStruct("Headers", request.Header) - - // Default context to Path - if event.Context == "" { - event.Context = request.URL.Path - } - - // Default user.id to IP so that users-affected works. - if event.User == nil { - ip := request.RemoteAddr - if idx := strings.LastIndex(ip, ":"); idx != -1 { - ip = ip[:idx] - } - event.User = &User{Id: ip} - } - } - } - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware_test.go deleted file mode 100644 index b1ef77a8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package bugsnag - -import ( - "bytes" - "fmt" - "log" - "reflect" - "testing" -) - -func TestMiddlewareOrder(t *testing.T) { - - result := make([]int, 0, 7) - stack := middlewareStack{} - stack.OnBeforeNotify(func(e *Event, c *Configuration) error { - result = append(result, 2) - return nil - }) - stack.OnBeforeNotify(func(e *Event, c *Configuration) error { - result = append(result, 1) - return nil - }) - stack.OnBeforeNotify(func(e *Event, c *Configuration) error { - result = append(result, 0) - return nil - }) - - stack.Run(nil, nil, func() error { - result = append(result, 3) - return nil - }) - - if !reflect.DeepEqual(result, []int{0, 1, 2, 3}) { - t.Errorf("unexpected middleware order %v", result) - } -} - -func TestBeforeNotifyReturnErr(t *testing.T) { - - stack := middlewareStack{} - err := fmt.Errorf("test") - - stack.OnBeforeNotify(func(e *Event, c *Configuration) error { - return err - }) - - called := false - - e := stack.Run(nil, nil, func() error { - called = true - return nil - }) - - if e != err { - t.Errorf("Middleware didn't return the error") - } - - if called == true { - t.Errorf("Notify was called when BeforeNotify returned False") - } -} - -func TestBeforeNotifyPanic(t *testing.T) { - - stack := middlewareStack{} - - stack.OnBeforeNotify(func(e *Event, c *Configuration) error { - panic("oops") - }) - - called := false - b := &bytes.Buffer{} - - stack.Run(nil, &Configuration{Logger: log.New(b, log.Prefix(), 0)}, func() error { - called = true - return nil - }) - - logged := b.String() - - if logged != "bugsnag/middleware: unexpected panic: oops\n" { - t.Errorf("Logged: %s", logged) - } - - if called == false { - t.Errorf("Notify was not called when BeforeNotify panicked") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/notifier.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/notifier.go deleted file mode 100644 index 6b108178..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/notifier.go +++ /dev/null @@ -1,95 +0,0 @@ -package bugsnag - -import ( - "fmt" - - "github.com/bugsnag/bugsnag-go/errors" -) - -// Notifier sends errors to Bugsnag. -type Notifier struct { - Config *Configuration - RawData []interface{} -} - -// New creates a new notifier. -// You can pass an instance of bugsnag.Configuration in rawData to change the configuration. -// Other values of rawData will be passed to Notify. -func New(rawData ...interface{}) *Notifier { - config := Config.clone() - for i, datum := range rawData { - if c, ok := datum.(Configuration); ok { - config.update(&c) - rawData[i] = nil - } - } - - return &Notifier{ - Config: config, - RawData: rawData, - } -} - -// Notify sends an error to Bugsnag. Any rawData you pass here will be sent to -// Bugsnag after being converted to JSON. e.g. bugsnag.SeverityError, bugsnag.Context, -// or bugsnag.MetaData. -func (notifier *Notifier) Notify(err error, rawData ...interface{}) (e error) { - event, config := newEvent(errors.New(err, 1), rawData, notifier) - - // Never block, start throwing away errors if we have too many. - e = middleware.Run(event, config, func() error { - config.log("notifying bugsnag: %s", event.Message) - if config.notifyInReleaseStage() { - if config.Synchronous { - return (&payload{event, config}).deliver() - } - go (&payload{event, config}).deliver() - return nil - } - return fmt.Errorf("not notifying in %s", config.ReleaseStage) - }) - - if e != nil { - config.log("bugsnag.Notify: %v", e) - } - return e -} - -// AutoNotify notifies Bugsnag of any panics, then repanics. -// It sends along any rawData that gets passed in. -// Usage: defer AutoNotify() -func (notifier *Notifier) AutoNotify(rawData ...interface{}) { - if err := recover(); err != nil { - rawData = notifier.addDefaultSeverity(rawData, SeverityError) - notifier.Notify(errors.New(err, 2), rawData...) - panic(err) - } -} - -// Recover logs any panics, then recovers. -// It sends along any rawData that gets passed in. -// Usage: defer Recover() -func (notifier *Notifier) Recover(rawData ...interface{}) { - if err := recover(); err != nil { - rawData = notifier.addDefaultSeverity(rawData, SeverityWarning) - notifier.Notify(errors.New(err, 2), rawData...) - } -} - -func (notifier *Notifier) dontPanic() { - if err := recover(); err != nil { - notifier.Config.log("bugsnag/notifier.Notify: panic! %s", err) - } -} - -// Add a severity to raw data only if the default is not set. -func (notifier *Notifier) addDefaultSeverity(rawData []interface{}, s severity) []interface{} { - - for _, datum := range append(notifier.RawData, rawData...) { - if _, ok := datum.(severity); ok { - return rawData - } - } - - return append(rawData, s) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap.go deleted file mode 100644 index 14fb9fa8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !appengine - -package bugsnag - -import ( - "github.com/bugsnag/panicwrap" - "github.com/bugsnag/bugsnag-go/errors" -) - -// NOTE: this function does not return when you call it, instead it -// re-exec()s the current process with panic monitoring. -func defaultPanicHandler() { - defer defaultNotifier.dontPanic() - - err := panicwrap.BasicMonitor(func(output string) { - toNotify, err := errors.ParsePanic(output) - - if err != nil { - defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err) - } - Notify(toNotify, SeverityError, Configuration{Synchronous: true}) - }) - - if err != nil { - defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap_test.go deleted file mode 100644 index 247c3f45..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// +build !appengine - -package bugsnag - -import ( - "github.com/bitly/go-simplejson" - "github.com/mitchellh/osext" - "os" - "os/exec" - "testing" - "time" -) - -func TestPanicHandler(t *testing.T) { - startTestServer() - - exePath, err := osext.Executable() - if err != nil { - t.Fatal(err) - } - - // Use the same trick as panicwrap() to re-run ourselves. - // In the init() block below, we will then panic. - cmd := exec.Command(exePath, os.Args[1:]...) - cmd.Env = append(os.Environ(), "BUGSNAG_API_KEY="+testAPIKey, "BUGSNAG_ENDPOINT="+testEndpoint, "please_panic=please_panic") - - if err = cmd.Start(); err != nil { - t.Fatal(err) - } - - if err = cmd.Wait(); err.Error() != "exit status 2" { - t.Fatal(err) - } - - json, err := simplejson.NewJson(<-postedJSON) - if err != nil { - t.Fatal(err) - } - - event := json.Get("events").GetIndex(0) - - if event.Get("severity").MustString() != "error" { - t.Errorf("severity should be error") - } - exception := event.Get("exceptions").GetIndex(0) - - if exception.Get("message").MustString() != "ruh roh" { - t.Errorf("caught wrong panic") - } - - if exception.Get("errorClass").MustString() != "panic" { - t.Errorf("caught wrong panic") - } - - frame := exception.Get("stacktrace").GetIndex(1) - - // Yeah, we just caught a panic from the init() function below and sent it to the server running above (mindblown) - if frame.Get("inProject").MustBool() != true || - frame.Get("file").MustString() != "panicwrap_test.go" || - frame.Get("method").MustString() != "panick" || - frame.Get("lineNumber").MustInt() == 0 { - t.Errorf("stack trace seemed wrong") - } -} - -func init() { - if os.Getenv("please_panic") != "" { - Configure(Configuration{APIKey: os.Getenv("BUGSNAG_API_KEY"), Endpoint: os.Getenv("BUGSNAG_ENDPOINT"), ProjectPackages: []string{"github.com/bugsnag/bugsnag-go"}}) - go func() { - panick() - }() - // Plenty of time to crash, it shouldn't need any of it. - time.Sleep(1 * time.Second) - } -} - -func panick() { - panic("ruh roh") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/payload.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/payload.go deleted file mode 100644 index a516a5d2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/payload.go +++ /dev/null @@ -1,96 +0,0 @@ -package bugsnag - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" -) - -type payload struct { - *Event - *Configuration -} - -type hash map[string]interface{} - -func (p *payload) deliver() error { - - if len(p.APIKey) != 32 { - return fmt.Errorf("bugsnag/payload.deliver: invalid api key") - } - - buf, err := json.Marshal(p) - - if err != nil { - return fmt.Errorf("bugsnag/payload.deliver: %v", err) - } - - client := http.Client{ - Transport: p.Transport, - } - - resp, err := client.Post(p.Endpoint, "application/json", bytes.NewBuffer(buf)) - - if err != nil { - return fmt.Errorf("bugsnag/payload.deliver: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("bugsnag/payload.deliver: Got HTTP %s\n", resp.Status) - } - - return nil -} - -func (p *payload) MarshalJSON() ([]byte, error) { - - data := hash{ - "apiKey": p.APIKey, - - "notifier": hash{ - "name": "Bugsnag Go", - "url": "https://github.com/bugsnag/bugsnag-go", - "version": VERSION, - }, - - "events": []hash{ - { - "payloadVersion": "2", - "exceptions": []hash{ - { - "errorClass": p.ErrorClass, - "message": p.Message, - "stacktrace": p.Stacktrace, - }, - }, - "severity": p.Severity.String, - "app": hash{ - "releaseStage": p.ReleaseStage, - }, - "user": p.User, - "metaData": p.MetaData.sanitize(p.ParamsFilters), - }, - }, - } - - event := data["events"].([]hash)[0] - - if p.Context != "" { - event["context"] = p.Context - } - if p.GroupingHash != "" { - event["groupingHash"] = p.GroupingHash - } - if p.Hostname != "" { - event["device"] = hash{ - "hostname": p.Hostname, - } - } - if p.AppVersion != "" { - event["app"].(hash)["version"] = p.AppVersion - } - return json.Marshal(data) - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/revel/bugsnagrevel.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/revel/bugsnagrevel.go deleted file mode 100644 index 149b010c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/revel/bugsnagrevel.go +++ /dev/null @@ -1,60 +0,0 @@ -// Package bugsnagrevel adds Bugsnag to revel. -// It lets you pass *revel.Controller into bugsnag.Notify(), -// and provides a Filter to catch errors. -package bugsnagrevel - -import ( - "strings" - "sync" - - "github.com/bugsnag/bugsnag-go" - "github.com/revel/revel" -) - -var once sync.Once - -// Filter should be added to the filter chain just after the PanicFilter. -// It sends errors to Bugsnag automatically. Configuration is read out of -// conf/app.conf, you should set bugsnag.apikey, and can also set -// bugsnag.endpoint, bugsnag.releasestage, bugsnag.appversion, -// bugsnag.projectroot, bugsnag.projectpackages if needed. -func Filter(c *revel.Controller, fc []revel.Filter) { - defer bugsnag.AutoNotify(c) - fc[0](c, fc[1:]) -} - -// Add support to bugsnag for reading data out of *revel.Controllers -func middleware(event *bugsnag.Event, config *bugsnag.Configuration) error { - for _, datum := range event.RawData { - if controller, ok := datum.(*revel.Controller); ok { - // make the request visible to the builtin HttpMIddleware - event.RawData = append(event.RawData, controller.Request.Request) - event.Context = controller.Action - event.MetaData.AddStruct("Session", controller.Session) - } - } - - return nil -} - -func init() { - revel.OnAppStart(func() { - bugsnag.OnBeforeNotify(middleware) - - var projectPackages []string - if packages, ok := revel.Config.String("bugsnag.projectpackages"); ok { - projectPackages = strings.Split(packages, ",") - } else { - projectPackages = []string{revel.ImportPath + "/app/*", revel.ImportPath + "/app"} - } - - bugsnag.Configure(bugsnag.Configuration{ - APIKey: revel.Config.StringDefault("bugsnag.apikey", ""), - Endpoint: revel.Config.StringDefault("bugsnag.endpoint", ""), - AppVersion: revel.Config.StringDefault("bugsnag.appversion", ""), - ReleaseStage: revel.Config.StringDefault("bugsnag.releasestage", revel.RunMode), - ProjectPackages: projectPackages, - Logger: revel.ERROR, - }) - }) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/LICENSE deleted file mode 100644 index 18527a28..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2012 Daniel Theophanes - -This software is provided 'as-is', without any express or implied -warranty. In no event will the authors be held liable for any damages -arising from the use of this software. - -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it -freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - - 3. This notice may not be removed or altered from any source - distribution. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext.go deleted file mode 100644 index 37efbb22..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Extensions to the standard "os" package. -package osext - -import "path/filepath" - -// Executable returns an absolute path that can be used to -// re-invoke the current program. -// It may not be valid after the current program exits. -func Executable() (string, error) { - p, err := executable() - return filepath.Clean(p), err -} - -// Returns same path as Executable, returns just the folder -// path. Excludes the executable name. -func ExecutableFolder() (string, error) { - p, err := Executable() - if err != nil { - return "", err - } - folder, _ := filepath.Split(p) - return folder, nil -} - -// Depricated. Same as Executable(). -func GetExePath() (exePath string, err error) { - return Executable() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_plan9.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_plan9.go deleted file mode 100644 index e88c1e09..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_plan9.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package osext - -import "syscall" - -func executable() (string, error) { - f, err := Open("/proc/" + itoa(Getpid()) + "/text") - if err != nil { - return "", err - } - defer f.Close() - return syscall.Fd2path(int(f.Fd())) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_procfs.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_procfs.go deleted file mode 100644 index 546fec91..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_procfs.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux netbsd openbsd - -package osext - -import ( - "errors" - "os" - "runtime" -) - -func executable() (string, error) { - switch runtime.GOOS { - case "linux": - return os.Readlink("/proc/self/exe") - case "netbsd": - return os.Readlink("/proc/curproc/exe") - case "openbsd": - return os.Readlink("/proc/curproc/file") - } - return "", errors.New("ExecPath not implemented for " + runtime.GOOS) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_sysctl.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_sysctl.go deleted file mode 100644 index d7646462..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_sysctl.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd - -package osext - -import ( - "os" - "runtime" - "syscall" - "unsafe" -) - -var startUpcwd, getwdError = os.Getwd() - -func executable() (string, error) { - var mib [4]int32 - switch runtime.GOOS { - case "freebsd": - mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} - case "darwin": - mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} - } - - n := uintptr(0) - // get length - _, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) - if err != 0 { - return "", err - } - if n == 0 { // shouldn't happen - return "", nil - } - buf := make([]byte, n) - _, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) - if err != 0 { - return "", err - } - if n == 0 { // shouldn't happen - return "", nil - } - for i, v := range buf { - if v == 0 { - buf = buf[:i] - break - } - } - if buf[0] != '/' { - if getwdError != nil { - return string(buf), getwdError - } else { - if buf[0] == '.' { - buf = buf[1:] - } - if startUpcwd[len(startUpcwd)-1] != '/' { - return startUpcwd + "/" + string(buf), nil - } - return startUpcwd + string(buf), nil - } - } - return string(buf), nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_test.go deleted file mode 100644 index dc661dbc..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin linux freebsd netbsd windows - -package osext - -import ( - "fmt" - "os" - oexec "os/exec" - "path/filepath" - "runtime" - "testing" -) - -const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH" - -func TestExecPath(t *testing.T) { - ep, err := Executable() - if err != nil { - t.Fatalf("ExecPath failed: %v", err) - } - // we want fn to be of the form "dir/prog" - dir := filepath.Dir(filepath.Dir(ep)) - fn, err := filepath.Rel(dir, ep) - if err != nil { - t.Fatalf("filepath.Rel: %v", err) - } - cmd := &oexec.Cmd{} - // make child start with a relative program path - cmd.Dir = dir - cmd.Path = fn - // forge argv[0] for child, so that we can verify we could correctly - // get real path of the executable without influenced by argv[0]. - cmd.Args = []string{"-", "-test.run=XXXX"} - cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)} - out, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("exec(self) failed: %v", err) - } - outs := string(out) - if !filepath.IsAbs(outs) { - t.Fatalf("Child returned %q, want an absolute path", out) - } - if !sameFile(outs, ep) { - t.Fatalf("Child returned %q, not the same file as %q", out, ep) - } -} - -func sameFile(fn1, fn2 string) bool { - fi1, err := os.Stat(fn1) - if err != nil { - return false - } - fi2, err := os.Stat(fn2) - if err != nil { - return false - } - return os.SameFile(fi1, fi2) -} - -func init() { - if e := os.Getenv(execPath_EnvVar); e != "" { - // first chdir to another path - dir := "/" - if runtime.GOOS == "windows" { - dir = filepath.VolumeName(".") - } - os.Chdir(dir) - if ep, err := Executable(); err != nil { - fmt.Fprint(os.Stderr, "ERROR: ", err) - } else { - fmt.Fprint(os.Stderr, ep) - } - os.Exit(0) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_windows.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_windows.go deleted file mode 100644 index 72d282cf..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/osext/osext_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package osext - -import ( - "syscall" - "unicode/utf16" - "unsafe" -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW") -) - -// GetModuleFileName() with hModule = NULL -func executable() (exePath string, err error) { - return getModuleFileName() -} - -func getModuleFileName() (string, error) { - var n uint32 - b := make([]uint16, syscall.MAX_PATH) - size := uint32(len(b)) - - r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) - n = uint32(r0) - if n == 0 { - return "", e1 - } - return string(utf16.Decode(b[0:n])), nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/LICENSE deleted file mode 100644 index f9c841a5..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/README.md deleted file mode 100644 index d0a59675..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# panicwrap - -panicwrap is a Go library that re-executes a Go binary and monitors stderr -output from the binary for a panic. When it find a panic, it executes a -user-defined handler function. Stdout, stderr, stdin, signals, and exit -codes continue to work as normal, making the existence of panicwrap mostly -invisble to the end user until a panic actually occurs. - -Since a panic is truly a bug in the program meant to crash the runtime, -globally catching panics within Go applications is not supposed to be possible. -Despite this, it is often useful to have a way to know when panics occur. -panicwrap allows you to do something with these panics, such as writing them -to a file, so that you can track when panics occur. - -panicwrap is ***not a panic recovery system***. Panics indicate serious -problems with your application and _should_ crash the runtime. panicwrap -is just meant as a way to monitor for panics. If you still think this is -the worst idea ever, read the section below on why. - -## Features - -* **SIMPLE!** -* Works with all Go applications on all platforms Go supports -* Custom behavior when a panic occurs -* Stdout, stderr, stdin, exit codes, and signals continue to work as - expected. - -## Usage - -Using panicwrap is simple. It behaves a lot like `fork`, if you know -how that works. A basic example is shown below. - -Because it would be sad to panic while capturing a panic, it is recommended -that the handler functions for panicwrap remain relatively simple and well -tested. panicwrap itself contains many tests. - -```go -package main - -import ( - "fmt" - "github.com/mitchellh/panicwrap" - "os" -) - -func main() { - exitStatus, err := panicwrap.BasicWrap(panicHandler) - if err != nil { - // Something went wrong setting up the panic wrapper. Unlikely, - // but possible. - panic(err) - } - - // If exitStatus >= 0, then we're the parent process and the panicwrap - // re-executed ourselves and completed. Just exit with the proper status. - if exitStatus >= 0 { - os.Exit(exitStatus) - } - - // Otherwise, exitStatus < 0 means we're the child. Continue executing as - // normal... - - // Let's say we panic - panic("oh shucks") -} - -func panicHandler(output string) { - // output contains the full output (including stack traces) of the - // panic. Put it in a file or something. - fmt.Printf("The child panicked:\n\n%s\n", output) - os.Exit(1) -} -``` - -## How Does it Work? - -panicwrap works by re-executing the running program (retaining arguments, -environmental variables, etc.) and monitoring the stderr of the program. -Since Go always outputs panics in a predictable way with a predictable -exit code, panicwrap is able to reliably detect panics and allow the parent -process to handle them. - -## WHY?! Panics should CRASH! - -Yes, panics _should_ crash. They are 100% always indicative of bugs. -However, in some cases, such as user-facing programs (programs like -[Packer](http://github.com/mitchellh/packer) or -[Docker](http://github.com/dotcloud/docker)), it is up to the user to -report such panics. This is unreliable, at best, and it would be better if the -program could have a way to automatically report panics. panicwrap provides -a way to do this. - -For backend applications, it is easier to detect crashes (since the application -exits). However, it is still nice sometimes to more intelligently log -panics in some way. For example, at [HashiCorp](http://www.hashicorp.com), -we use panicwrap to log panics to timestamped files with some additional -data (configuration settings at the time, environmental variables, etc.) - -The goal of panicwrap is _not_ to hide panics. It is instead to provide -a clean mechanism for handling them before bubbling the up to the user -and ultimately crashing. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor.go deleted file mode 100644 index 1c64a546..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build !windows - -package panicwrap - -import ( - "github.com/bugsnag/osext" - "os" - "os/exec" - "syscall" -) - -func monitor(c *WrapConfig) (int, error) { - - // If we're the child process, absorb panics. - if Wrapped(c) { - panicCh := make(chan string) - - go trackPanic(os.Stdin, os.Stderr, c.DetectDuration, panicCh) - - // Wait on the panic data - panicTxt := <-panicCh - if panicTxt != "" { - if !c.HidePanic { - os.Stderr.Write([]byte(panicTxt)) - } - - c.Handler(panicTxt) - } - - os.Exit(0) - } - - exePath, err := osext.Executable() - if err != nil { - return -1, err - } - cmd := exec.Command(exePath, os.Args[1:]...) - - read, write, err := os.Pipe() - if err != nil { - return -1, err - } - - cmd.Stdin = read - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue) - - if err != nil { - return -1, err - } - err = cmd.Start() - if err != nil { - return -1, err - } - - err = syscall.Dup2(int(write.Fd()), int(os.Stderr.Fd())) - if err != nil { - return -1, err - } - - return -1, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor_windows.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor_windows.go deleted file mode 100644 index d07a6921..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package panicwrap - -import "fmt" - -func monitor(c *WrapConfig) (int, error) { - return -1, fmt.Errorf("Monitor is not supported on windows") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap.go deleted file mode 100644 index f9ea3e3e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap.go +++ /dev/null @@ -1,339 +0,0 @@ -// The panicwrap package provides functions for capturing and handling -// panics in your application. It does this by re-executing the running -// application and monitoring stderr for any panics. At the same time, -// stdout/stderr/etc. are set to the same values so that data is shuttled -// through properly, making the existence of panicwrap mostly transparent. -// -// Panics are only detected when the subprocess exits with a non-zero -// exit status, since this is the only time panics are real. Otherwise, -// "panic-like" output is ignored. -package panicwrap - -import ( - "bytes" - "errors" - "github.com/bugsnag/osext" - "io" - "os" - "os/exec" - "os/signal" - "runtime" - "syscall" - "time" -) - -const ( - DEFAULT_COOKIE_KEY = "cccf35992f8f3cd8d1d28f0109dd953e26664531" - DEFAULT_COOKIE_VAL = "7c28215aca87789f95b406b8dd91aa5198406750" -) - -// HandlerFunc is the type called when a panic is detected. -type HandlerFunc func(string) - -// WrapConfig is the configuration for panicwrap when wrapping an existing -// binary. To get started, in general, you only need the BasicWrap function -// that will set this up for you. However, for more customizability, -// WrapConfig and Wrap can be used. -type WrapConfig struct { - // Handler is the function called when a panic occurs. - Handler HandlerFunc - - // The cookie key and value are used within environmental variables - // to tell the child process that it is already executing so that - // wrap doesn't re-wrap itself. - CookieKey string - CookieValue string - - // If true, the panic will not be mirrored to the configured writer - // and will instead ONLY go to the handler. This lets you effectively - // hide panics from the end user. This is not recommended because if - // your handler fails, the panic is effectively lost. - HidePanic bool - - // If true, panicwrap will boot a monitor sub-process and let the parent - // run the app. This mode is useful for processes run under supervisors - // like runit as signals get sent to the correct codebase. This is not - // supported when GOOS=windows, and ignores c.Stderr and c.Stdout. - Monitor bool - - // The amount of time that a process must exit within after detecting - // a panic header for panicwrap to assume it is a panic. Defaults to - // 300 milliseconds. - DetectDuration time.Duration - - // The writer to send the stderr to. If this is nil, then it defaults - // to os.Stderr. - Writer io.Writer - - // The writer to send stdout to. If this is nil, then it defaults to - // os.Stdout. - Stdout io.Writer -} - -// BasicWrap calls Wrap with the given handler function, using defaults -// for everything else. See Wrap and WrapConfig for more information on -// functionality and return values. -func BasicWrap(f HandlerFunc) (int, error) { - return Wrap(&WrapConfig{ - Handler: f, - }) -} - -// BasicMonitor calls Wrap with Monitor set to true on supported platforms. -// It forks your program and runs it again form the start. In one process -// BasicMonitor never returns, it just listens on stderr of the other process, -// and calls your handler when a panic is seen. In the other it either returns -// nil to indicate that the panic monitoring is enabled, or an error to indicate -// that something else went wrong. -func BasicMonitor(f HandlerFunc) error { - exitStatus, err := Wrap(&WrapConfig{ - Handler: f, - Monitor: runtime.GOOS != "windows", - }) - - if err != nil { - return err - } - - if exitStatus >= 0 { - os.Exit(exitStatus) - } - - return nil -} - -// Wrap wraps the current executable in a handler to catch panics. It -// returns an error if there was an error during the wrapping process. -// If the error is nil, then the int result indicates the exit status of the -// child process. If the exit status is -1, then this is the child process, -// and execution should continue as normal. Otherwise, this is the parent -// process and the child successfully ran already, and you should exit the -// process with the returned exit status. -// -// This function should be called very very early in your program's execution. -// Ideally, this runs as the first line of code of main. -// -// Once this is called, the given WrapConfig shouldn't be modified or used -// any further. -func Wrap(c *WrapConfig) (int, error) { - if c.Handler == nil { - return -1, errors.New("Handler must be set") - } - - if c.DetectDuration == 0 { - c.DetectDuration = 300 * time.Millisecond - } - - if c.Writer == nil { - c.Writer = os.Stderr - } - - if c.Monitor { - return monitor(c) - } else { - return wrap(c) - } -} - -func wrap(c *WrapConfig) (int, error) { - - // If we're already wrapped, exit out. - if Wrapped(c) { - return -1, nil - } - - // Get the path to our current executable - exePath, err := osext.Executable() - if err != nil { - return -1, err - } - - // Pipe the stderr so we can read all the data as we look for panics - stderr_r, stderr_w := io.Pipe() - - // doneCh is closed when we're done, signaling any other goroutines - // to end immediately. - doneCh := make(chan struct{}) - - // panicCh is the channel on which the panic text will actually be - // sent. - panicCh := make(chan string) - - // On close, make sure to finish off the copying of data to stderr - defer func() { - defer close(doneCh) - stderr_w.Close() - <-panicCh - }() - - // Start the goroutine that will watch stderr for any panics - go trackPanic(stderr_r, c.Writer, c.DetectDuration, panicCh) - - // Create the writer for stdout that we're going to use - var stdout_w io.Writer = os.Stdout - if c.Stdout != nil { - stdout_w = c.Stdout - } - - // Build a subcommand to re-execute ourselves. We make sure to - // set the environmental variable to include our cookie. We also - // set stdin/stdout to match the config. Finally, we pipe stderr - // through ourselves in order to watch for panics. - cmd := exec.Command(exePath, os.Args[1:]...) - cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue) - cmd.Stdin = os.Stdin - cmd.Stdout = stdout_w - cmd.Stderr = stderr_w - if err := cmd.Start(); err != nil { - return 1, err - } - - // Listen to signals and capture them forever. We allow the child - // process to handle them in some way. - sigCh := make(chan os.Signal) - signal.Notify(sigCh, os.Interrupt) - go func() { - defer signal.Stop(sigCh) - for { - select { - case <-doneCh: - return - case <-sigCh: - } - } - }() - - if err := cmd.Wait(); err != nil { - exitErr, ok := err.(*exec.ExitError) - if !ok { - // This is some other kind of subprocessing error. - return 1, err - } - - exitStatus := 1 - if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { - exitStatus = status.ExitStatus() - } - - // Close the writer end so that the tracker goroutine ends at some point - stderr_w.Close() - - // Wait on the panic data - panicTxt := <-panicCh - if panicTxt != "" { - if !c.HidePanic { - c.Writer.Write([]byte(panicTxt)) - } - - c.Handler(panicTxt) - } - - return exitStatus, nil - } - - return 0, nil -} - -// Wrapped checks if we're already wrapped according to the configuration -// given. -// -// Wrapped is very cheap and can be used early to short-circuit some pre-wrap -// logic your application may have. -func Wrapped(c *WrapConfig) bool { - if c.CookieKey == "" { - c.CookieKey = DEFAULT_COOKIE_KEY - } - - if c.CookieValue == "" { - c.CookieValue = DEFAULT_COOKIE_VAL - } - - // If the cookie key/value match our environment, then we are the - // child, so just exit now and tell the caller that we're the child - return os.Getenv(c.CookieKey) == c.CookieValue -} - -// trackPanic monitors the given reader for a panic. If a panic is detected, -// it is outputted on the result channel. This will close the channel once -// it is complete. -func trackPanic(r io.Reader, w io.Writer, dur time.Duration, result chan<- string) { - defer close(result) - - var panicTimer <-chan time.Time - panicBuf := new(bytes.Buffer) - panicHeader := []byte("panic:") - - tempBuf := make([]byte, 2048) - for { - var buf []byte - var n int - - if panicTimer == nil && panicBuf.Len() > 0 { - // We're not tracking a panic but the buffer length is - // greater than 0. We need to clear out that buffer, but - // look for another panic along the way. - - // First, remove the previous panic header so we don't loop - w.Write(panicBuf.Next(len(panicHeader))) - - // Next, assume that this is our new buffer to inspect - n = panicBuf.Len() - buf = make([]byte, n) - copy(buf, panicBuf.Bytes()) - panicBuf.Reset() - } else { - var err error - buf = tempBuf - n, err = r.Read(buf) - if n <= 0 && err == io.EOF { - if panicBuf.Len() > 0 { - // We were tracking a panic, assume it was a panic - // and return that as the result. - result <- panicBuf.String() - } - - return - } - } - - if panicTimer != nil { - // We're tracking what we think is a panic right now. - // If the timer ended, then it is not a panic. - isPanic := true - select { - case <-panicTimer: - isPanic = false - default: - } - - // No matter what, buffer the text some more. - panicBuf.Write(buf[0:n]) - - if !isPanic { - // It isn't a panic, stop tracking. Clean-up will happen - // on the next iteration. - panicTimer = nil - } - - continue - } - - flushIdx := n - idx := bytes.Index(buf[0:n], panicHeader) - if idx >= 0 { - flushIdx = idx - } - - // Flush to stderr what isn't a panic - w.Write(buf[0:flushIdx]) - - if idx < 0 { - // Not a panic so just continue along - continue - } - - // We have a panic header. Write we assume is a panic os far. - panicBuf.Write(buf[idx:n]) - panicTimer = time.After(dur) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap_test.go deleted file mode 100644 index dd1d77f2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap_test.go +++ /dev/null @@ -1,360 +0,0 @@ -package panicwrap - -import ( - "bytes" - "fmt" - "os" - "os/exec" - "strings" - "testing" - "time" -) - -func helperProcess(s ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcess", "--"} - cs = append(cs, s...) - env := []string{ - "GO_WANT_HELPER_PROCESS=1", - } - - cmd := exec.Command(os.Args[0], cs...) - cmd.Env = append(env, os.Environ()...) - cmd.Stdin = os.Stdin - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - return cmd -} - -// This is executed by `helperProcess` in a separate process in order to -// provider a proper sub-process environment to test some of our functionality. -func TestHelperProcess(*testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - - // Find the arguments to our helper, which are the arguments past - // the "--" in the command line. - args := os.Args - for len(args) > 0 { - if args[0] == "--" { - args = args[1:] - break - } - - args = args[1:] - } - - if len(args) == 0 { - fmt.Fprintf(os.Stderr, "No command\n") - os.Exit(2) - } - - panicHandler := func(s string) { - fmt.Fprintf(os.Stdout, "wrapped: %d", len(s)) - os.Exit(0) - } - - cmd, args := args[0], args[1:] - switch cmd { - case "no-panic-ordered-output": - exitStatus, err := BasicWrap(panicHandler) - if err != nil { - fmt.Fprintf(os.Stderr, "wrap error: %s", err) - os.Exit(1) - } - - if exitStatus < 0 { - for i := 0; i < 1000; i++ { - os.Stdout.Write([]byte("a")) - os.Stderr.Write([]byte("b")) - } - os.Exit(0) - } - - os.Exit(exitStatus) - case "no-panic-output": - fmt.Fprint(os.Stdout, "i am output") - fmt.Fprint(os.Stderr, "stderr out") - os.Exit(0) - case "panic-boundary": - exitStatus, err := BasicWrap(panicHandler) - - if err != nil { - fmt.Fprintf(os.Stderr, "wrap error: %s", err) - os.Exit(1) - } - - if exitStatus < 0 { - // Simulate a panic but on two boundaries... - fmt.Fprint(os.Stderr, "pan") - os.Stderr.Sync() - time.Sleep(100 * time.Millisecond) - fmt.Fprint(os.Stderr, "ic: oh crap") - os.Exit(2) - } - - os.Exit(exitStatus) - case "panic-long": - exitStatus, err := BasicWrap(panicHandler) - - if err != nil { - fmt.Fprintf(os.Stderr, "wrap error: %s", err) - os.Exit(1) - } - - if exitStatus < 0 { - // Make a fake panic by faking the header and adding a - // bunch of garbage. - fmt.Fprint(os.Stderr, "panic: foo\n\n") - for i := 0; i < 1024; i++ { - fmt.Fprint(os.Stderr, "foobarbaz") - } - - // Sleep so that it dumps the previous data - //time.Sleep(1 * time.Millisecond) - time.Sleep(500 * time.Millisecond) - - // Make a real panic - panic("I AM REAL!") - } - - os.Exit(exitStatus) - case "panic": - hidePanic := false - if args[0] == "hide" { - hidePanic = true - } - - config := &WrapConfig{ - Handler: panicHandler, - HidePanic: hidePanic, - } - - exitStatus, err := Wrap(config) - - if err != nil { - fmt.Fprintf(os.Stderr, "wrap error: %s", err) - os.Exit(1) - } - - if exitStatus < 0 { - panic("uh oh") - } - - os.Exit(exitStatus) - case "wrapped": - child := false - if len(args) > 0 && args[0] == "child" { - child = true - } - config := &WrapConfig{ - Handler: panicHandler, - } - - exitStatus, err := Wrap(config) - if err != nil { - fmt.Fprintf(os.Stderr, "wrap error: %s", err) - os.Exit(1) - } - - if exitStatus < 0 { - if child { - fmt.Printf("%v", Wrapped(config)) - } - os.Exit(0) - } - - if !child { - fmt.Printf("%v", Wrapped(config)) - } - os.Exit(exitStatus) - case "panic-monitor": - - config := &WrapConfig{ - Handler: panicHandler, - HidePanic: true, - Monitor: true, - } - - exitStatus, err := Wrap(config) - - if err != nil { - fmt.Fprintf(os.Stderr, "wrap error: %s", err) - os.Exit(1) - } - - if exitStatus != -1 { - fmt.Fprintf(os.Stderr, "wrap error: %s", err) - os.Exit(1) - } - - panic("uh oh") - - default: - fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) - os.Exit(2) - } -} - -func TestPanicWrap_Output(t *testing.T) { - stderr := new(bytes.Buffer) - stdout := new(bytes.Buffer) - - p := helperProcess("no-panic-output") - p.Stdout = stdout - p.Stderr = stderr - if err := p.Run(); err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(stdout.String(), "i am output") { - t.Fatalf("didn't forward: %#v", stdout.String()) - } - - if !strings.Contains(stderr.String(), "stderr out") { - t.Fatalf("didn't forward: %#v", stderr.String()) - } -} - -/* -TODO(mitchellh): This property would be nice to gain. -func TestPanicWrap_Output_Order(t *testing.T) { - output := new(bytes.Buffer) - - p := helperProcess("no-panic-ordered-output") - p.Stdout = output - p.Stderr = output - if err := p.Run(); err != nil { - t.Fatalf("err: %s", err) - } - - expectedBuf := new(bytes.Buffer) - for i := 0; i < 1000; i++ { - expectedBuf.WriteString("ab") - } - - actual := strings.TrimSpace(output.String()) - expected := strings.TrimSpace(expectedBuf.String()) - - if actual != expected { - t.Fatalf("bad: %#v", actual) - } -} -*/ - -func TestPanicWrap_panicHide(t *testing.T) { - stdout := new(bytes.Buffer) - stderr := new(bytes.Buffer) - - p := helperProcess("panic", "hide") - p.Stdout = stdout - p.Stderr = stderr - if err := p.Run(); err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(stdout.String(), "wrapped:") { - t.Fatalf("didn't wrap: %#v", stdout.String()) - } - - if strings.Contains(stderr.String(), "panic:") { - t.Fatalf("shouldn't have panic: %#v", stderr.String()) - } -} - -func TestPanicWrap_panicShow(t *testing.T) { - stdout := new(bytes.Buffer) - stderr := new(bytes.Buffer) - - p := helperProcess("panic", "show") - p.Stdout = stdout - p.Stderr = stderr - if err := p.Run(); err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(stdout.String(), "wrapped:") { - t.Fatalf("didn't wrap: %#v", stdout.String()) - } - - if !strings.Contains(stderr.String(), "panic:") { - t.Fatalf("should have panic: %#v", stderr.String()) - } -} - -func TestPanicWrap_panicLong(t *testing.T) { - stdout := new(bytes.Buffer) - - p := helperProcess("panic-long") - p.Stdout = stdout - p.Stderr = new(bytes.Buffer) - if err := p.Run(); err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(stdout.String(), "wrapped:") { - t.Fatalf("didn't wrap: %#v", stdout.String()) - } -} - -func TestPanicWrap_panicBoundary(t *testing.T) { - // TODO(mitchellh): panics are currently lost on boundaries - t.SkipNow() - - stdout := new(bytes.Buffer) - - p := helperProcess("panic-boundary") - p.Stdout = stdout - //p.Stderr = new(bytes.Buffer) - if err := p.Run(); err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(stdout.String(), "wrapped: 1015") { - t.Fatalf("didn't wrap: %#v", stdout.String()) - } -} - -func TestPanicWrap_monitor(t *testing.T) { - - stdout := new(bytes.Buffer) - - p := helperProcess("panic-monitor") - p.Stdout = stdout - //p.Stderr = new(bytes.Buffer) - if err := p.Run(); err == nil || err.Error() != "exit status 2" { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(stdout.String(), "wrapped:") { - t.Fatalf("didn't wrap: %#v", stdout.String()) - } -} - -func TestWrapped(t *testing.T) { - stdout := new(bytes.Buffer) - - p := helperProcess("wrapped", "child") - p.Stdout = stdout - if err := p.Run(); err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(stdout.String(), "true") { - t.Fatalf("bad: %#v", stdout.String()) - } -} - -func TestWrapped_parent(t *testing.T) { - stdout := new(bytes.Buffer) - - p := helperProcess("wrapped") - p.Stdout = stdout - if err := p.Run(); err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(stdout.String(), "false") { - t.Fatalf("bad: %#v", stdout.String()) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml deleted file mode 100644 index baf46abc..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go -go: 1.1 - -script: -- go vet ./... -- go test -v ./... diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE deleted file mode 100644 index 5515ccfb..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (C) 2013 Jeremy Saenz -All Rights Reserved. - -MIT LICENSE - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/README.md deleted file mode 100644 index c0bb338a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/README.md +++ /dev/null @@ -1,298 +0,0 @@ -[![Build Status](https://travis-ci.org/codegangsta/cli.png?branch=master)](https://travis-ci.org/codegangsta/cli) - -# cli.go -cli.go is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. - -You can view the API docs here: -http://godoc.org/github.com/codegangsta/cli - -## Overview -Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. - -**This is where cli.go comes into play.** cli.go makes command line programming fun, organized, and expressive! - -## Installation -Make sure you have a working Go environment (go 1.1 is *required*). [See the install instructions](http://golang.org/doc/install.html). - -To install `cli.go`, simply run: -``` -$ go get github.com/codegangsta/cli -``` - -Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used: -``` -export PATH=$PATH:$GOPATH/bin -``` - -## Getting Started -One of the philosophies behind cli.go is that an API should be playful and full of discovery. So a cli.go app can be as little as one line of code in `main()`. - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - cli.NewApp().Run(os.Args) -} -``` - -This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation: - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Action = func(c *cli.Context) { - println("boom! I say!") - } - - app.Run(os.Args) -} -``` - -Running this already gives you a ton of functionality, plus support for things like subcommands and flags, which are covered below. - -## Example - -Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness! - -Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it: - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "greet" - app.Usage = "fight the loneliness!" - app.Action = func(c *cli.Context) { - println("Hello friend!") - } - - app.Run(os.Args) -} -``` - -Install our command to the `$GOPATH/bin` directory: - -``` -$ go install -``` - -Finally run our new command: - -``` -$ greet -Hello friend! -``` - -cli.go also generates some bitchass help text: -``` -$ greet help -NAME: - greet - fight the loneliness! - -USAGE: - greet [global options] command [command options] [arguments...] - -VERSION: - 0.0.0 - -COMMANDS: - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS - --version Shows version information -``` - -### Arguments -You can lookup arguments by calling the `Args` function on `cli.Context`. - -``` go -... -app.Action = func(c *cli.Context) { - println("Hello", c.Args()[0]) -} -... -``` - -### Flags -Setting and querying flags is simple. -``` go -... -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, -} -app.Action = func(c *cli.Context) { - name := "someone" - if len(c.Args()) > 0 { - name = c.Args()[0] - } - if c.String("lang") == "spanish" { - println("Hola", name) - } else { - println("Hello", name) - } -} -... -``` - -#### Alternate Names - -You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - }, -} -``` - -That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. - -#### Values from the Environment - -You can also have the default value set from the environment via `EnvVar`. e.g. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "APP_LANG", - }, -} -``` - -The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", - }, -} -``` - -### Subcommands - -Subcommands can be defined for a more git-like command line app. -```go -... -app.Commands = []cli.Command{ - { - Name: "add", - ShortName: "a", - Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - ShortName: "c", - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - }, - { - Name: "template", - ShortName: "r", - Usage: "options for task templates", - Subcommands: []cli.Command{ - { - Name: "add", - Usage: "add a new template", - Action: func(c *cli.Context) { - println("new task template: ", c.Args().First()) - }, - }, - { - Name: "remove", - Usage: "remove an existing template", - Action: func(c *cli.Context) { - println("removed task template: ", c.Args().First()) - }, - }, - }, - }, -} -... -``` - -### Bash Completion - -You can enable completion commands by setting the `EnableBashCompletion` -flag on the `App` object. By default, this setting will only auto-complete to -show an app's subcommands, but you can write your own completion methods for -the App or its subcommands. -```go -... -var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"} -app := cli.NewApp() -app.EnableBashCompletion = true -app.Commands = []cli.Command{ - { - Name: "complete", - ShortName: "c", - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - BashComplete: func(c *cli.Context) { - // This will complete if no args are passed - if len(c.Args()) > 0 { - return - } - for _, t := range tasks { - fmt.Println(t) - } - }, - } -} -... -``` - -#### To Enable - -Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while -setting the `PROG` variable to the name of your program: - -`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` - - -## Contribution Guidelines -Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch. - -If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together. - -If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/app.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/app.go deleted file mode 100644 index 6422345d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/app.go +++ /dev/null @@ -1,275 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "text/tabwriter" - "text/template" - "time" -) - -// App is the main structure of a cli application. It is recomended that -// and app be created with the cli.NewApp() function -type App struct { - // The name of the program. Defaults to os.Args[0] - Name string - // Description of the program. - Usage string - // Version of the program - Version string - // List of commands to execute - Commands []Command - // List of flags to parse - Flags []Flag - // Boolean to enable bash completion commands - EnableBashCompletion bool - // Boolean to hide built-in help command - HideHelp bool - // Boolean to hide built-in version flag - HideVersion bool - // An action to execute when the bash-completion flag is set - BashComplete func(context *Context) - // An action to execute before any subcommands are run, but after the context is ready - // If a non-nil error is returned, no subcommands are run - Before func(context *Context) error - // The action to execute when no subcommands are specified - Action func(context *Context) - // Execute this function if the proper command cannot be found - CommandNotFound func(context *Context, command string) - // Compilation date - Compiled time.Time - // Author - Author string - // Author e-mail - Email string - // Writer writer to write output to - Writer io.Writer -} - -// Tries to find out when this binary was compiled. -// Returns the current time if it fails to find it. -func compileTime() time.Time { - info, err := os.Stat(os.Args[0]) - if err != nil { - return time.Now() - } - return info.ModTime() -} - -// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. -func NewApp() *App { - return &App{ - Name: os.Args[0], - Usage: "A new cli application", - Version: "0.0.0", - BashComplete: DefaultAppComplete, - Action: helpCommand.Action, - Compiled: compileTime(), - Author: "Author", - Email: "unknown@email", - Writer: os.Stdout, - } -} - -// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination -func (a *App) Run(arguments []string) error { - if HelpPrinter == nil { - defer func() { - HelpPrinter = nil - }() - - HelpPrinter = func(templ string, data interface{}) { - w := tabwriter.NewWriter(a.Writer, 0, 8, 1, '\t', 0) - t := template.Must(template.New("help").Parse(templ)) - err := t.Execute(w, data) - if err != nil { - panic(err) - } - w.Flush() - } - } - - // append help to commands - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - - //append version/help flags - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) - } - - if !a.HideVersion { - a.appendFlag(VersionFlag) - } - - // parse flags - set := flagSet(a.Name, a.Flags) - set.SetOutput(ioutil.Discard) - err := set.Parse(arguments[1:]) - nerr := normalizeFlags(a.Flags, set) - if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - context := NewContext(a, set, set) - ShowAppHelp(context) - fmt.Fprintln(a.Writer) - return nerr - } - context := NewContext(a, set, set) - - if err != nil { - fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n") - ShowAppHelp(context) - fmt.Fprintln(a.Writer) - return err - } - - if checkCompletions(context) { - return nil - } - - if checkHelp(context) { - return nil - } - - if checkVersion(context) { - return nil - } - - if a.Before != nil { - err := a.Before(context) - if err != nil { - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - // Run default Action - a.Action(context) - return nil -} - -// Another entry point to the cli app, takes care of passing arguments and error handling -func (a *App) RunAndExitOnError() { - if err := a.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags -func (a *App) RunAsSubcommand(ctx *Context) error { - // append help to commands - if len(a.Commands) > 0 { - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - } - - // append flags - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) - } - - // parse flags - set := flagSet(a.Name, a.Flags) - set.SetOutput(ioutil.Discard) - err := set.Parse(ctx.Args().Tail()) - nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, ctx.globalSet) - - if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - if len(a.Commands) > 0 { - ShowSubcommandHelp(context) - } else { - ShowCommandHelp(ctx, context.Args().First()) - } - fmt.Fprintln(a.Writer) - return nerr - } - - if err != nil { - fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n") - ShowSubcommandHelp(context) - return err - } - - if checkCompletions(context) { - return nil - } - - if len(a.Commands) > 0 { - if checkSubcommandHelp(context) { - return nil - } - } else { - if checkCommandHelp(ctx, context.Args().First()) { - return nil - } - } - - if a.Before != nil { - err := a.Before(context) - if err != nil { - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - // Run default Action - a.Action(context) - - return nil -} - -// Returns the named command on App. Returns nil if the command does not exist -func (a *App) Command(name string) *Command { - for _, c := range a.Commands { - if c.HasName(name) { - return &c - } - } - - return nil -} - -func (a *App) hasFlag(flag Flag) bool { - for _, f := range a.Flags { - if flag == f { - return true - } - } - - return false -} - -func (a *App) appendFlag(flag Flag) { - if !a.hasFlag(flag) { - a.Flags = append(a.Flags, flag) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go deleted file mode 100644 index 2cbb0e3a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go +++ /dev/null @@ -1,554 +0,0 @@ -package cli_test - -import ( - "flag" - "fmt" - "os" - "testing" - - "github.com/codegangsta/cli" -) - -func ExampleApp() { - // set args for examples sake - os.Args = []string{"greet", "--name", "Jeremy"} - - app := cli.NewApp() - app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Action = func(c *cli.Context) { - fmt.Printf("Hello %v\n", c.String("name")) - } - app.Run(os.Args) - // Output: - // Hello Jeremy -} - -func ExampleAppSubcommand() { - // set args for examples sake - os.Args = []string{"say", "hi", "english", "--name", "Jeremy"} - app := cli.NewApp() - app.Name = "say" - app.Commands = []cli.Command{ - { - Name: "hello", - ShortName: "hi", - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ - { - Name: "english", - ShortName: "en", - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *cli.Context) { - fmt.Println("Hello,", c.String("name")) - }, - }, - }, - }, - } - - app.Run(os.Args) - // Output: - // Hello, Jeremy -} - -func ExampleAppHelp() { - // set args for examples sake - os.Args = []string{"greet", "h", "describeit"} - - app := cli.NewApp() - app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Commands = []cli.Command{ - { - Name: "describeit", - ShortName: "d", - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { - fmt.Printf("i like to describe things") - }, - }, - } - app.Run(os.Args) - // Output: - // NAME: - // describeit - use it to see a description - // - // USAGE: - // command describeit [arguments...] - // - // DESCRIPTION: - // This is how we describe describeit the function -} - -func ExampleAppBashComplete() { - // set args for examples sake - os.Args = []string{"greet", "--generate-bash-completion"} - - app := cli.NewApp() - app.Name = "greet" - app.EnableBashCompletion = true - app.Commands = []cli.Command{ - { - Name: "describeit", - ShortName: "d", - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { - fmt.Printf("i like to describe things") - }, - }, { - Name: "next", - Usage: "next example", - Description: "more stuff to see when generating bash completion", - Action: func(c *cli.Context) { - fmt.Printf("the next example") - }, - }, - } - - app.Run(os.Args) - // Output: - // describeit - // d - // next - // help - // h -} - -func TestApp_Run(t *testing.T) { - s := "" - - app := cli.NewApp() - app.Action = func(c *cli.Context) { - s = s + c.Args().First() - } - - err := app.Run([]string{"command", "foo"}) - expect(t, err, nil) - err = app.Run([]string{"command", "bar"}) - expect(t, err, nil) - expect(t, s, "foobar") -} - -var commandAppTests = []struct { - name string - expected bool -}{ - {"foobar", true}, - {"batbaz", true}, - {"b", true}, - {"f", true}, - {"bat", false}, - {"nothing", false}, -} - -func TestApp_Command(t *testing.T) { - app := cli.NewApp() - fooCommand := cli.Command{Name: "foobar", ShortName: "f"} - batCommand := cli.Command{Name: "batbaz", ShortName: "b"} - app.Commands = []cli.Command{ - fooCommand, - batCommand, - } - - for _, test := range commandAppTests { - expect(t, app.Command(test.name) != nil, test.expected) - } -} - -func TestApp_CommandWithArgBeforeFlags(t *testing.T) { - var parsedOption, firstArg string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *cli.Context) { - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"}) - - expect(t, parsedOption, "my-option") - expect(t, firstArg, "my-arg") -} - -func TestApp_RunAsSubcommandParseFlags(t *testing.T) { - var context *cli.Context - - a := cli.NewApp() - a.Commands = []cli.Command{ - { - Name: "foo", - Action: func(c *cli.Context) { - context = c - }, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, - }, - Before: func(_ *cli.Context) error { return nil }, - }, - } - a.Run([]string{"", "foo", "--lang", "spanish", "abcd"}) - - expect(t, context.Args().Get(0), "abcd") - expect(t, context.String("lang"), "spanish") -} - -func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) { - var parsedOption string - var args []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *cli.Context) { - parsedOption = c.String("option") - args = c.Args() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"}) - - expect(t, parsedOption, "my-option") - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "--notARealFlag") -} - -func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) { - var args []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Action: func(c *cli.Context) { - args = c.Args() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"}) - - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "notAFlagAtAll") -} - -func TestApp_Float64Flag(t *testing.T) { - var meters float64 - - app := cli.NewApp() - app.Flags = []cli.Flag{ - cli.Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, - } - app.Action = func(c *cli.Context) { - meters = c.Float64("height") - } - - app.Run([]string{"", "--height", "1.93"}) - expect(t, meters, 1.93) -} - -func TestApp_ParseSliceFlags(t *testing.T) { - var parsedOption, firstArg string - var parsedIntSlice []int - var parsedStringSlice []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "p", Value: &cli.IntSlice{}, Usage: "set one or more ip addr"}, - cli.StringSliceFlag{Name: "ip", Value: &cli.StringSlice{}, Usage: "set one or more ports to open"}, - }, - Action: func(c *cli.Context) { - parsedIntSlice = c.IntSlice("p") - parsedStringSlice = c.StringSlice("ip") - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"}) - - IntsEquals := func(a, b []int) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - - StrsEquals := func(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - var expectedIntSlice = []int{22, 80} - var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"} - - if !IntsEquals(parsedIntSlice, expectedIntSlice) { - t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice) - } - - if !StrsEquals(parsedStringSlice, expectedStringSlice) { - t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice) - } -} - -func TestApp_DefaultStdout(t *testing.T) { - app := cli.NewApp() - - if app.Writer != os.Stdout { - t.Error("Default output writer not set.") - } -} - -type mockWriter struct { - written []byte -} - -func (fw *mockWriter) Write(p []byte) (n int, err error) { - if fw.written == nil { - fw.written = p - } else { - fw.written = append(fw.written, p...) - } - - return len(p), nil -} - -func (fw *mockWriter) GetWritten() (b []byte) { - return fw.written -} - -func TestApp_SetStdout(t *testing.T) { - w := &mockWriter{} - - app := cli.NewApp() - app.Name = "test" - app.Writer = w - - err := app.Run([]string{"help"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if len(w.written) == 0 { - t.Error("App did not write output to desired writer.") - } -} - -func TestApp_BeforeFunc(t *testing.T) { - beforeRun, subcommandRun := false, false - beforeError := fmt.Errorf("fail") - var err error - - app := cli.NewApp() - - app.Before = func(c *cli.Context) error { - beforeRun = true - s := c.String("opt") - if s == "fail" { - return beforeError - } - - return nil - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "sub", - Action: func(c *cli.Context) { - subcommandRun = true - }, - }, - } - - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "opt"}, - } - - // run with the Before() func succeeding - err = app.Run([]string{"command", "--opt", "succeed", "sub"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } - - // reset - beforeRun, subcommandRun = false, false - - // run with the Before() func failing - err = app.Run([]string{"command", "--opt", "fail", "sub"}) - - // should be the same error produced by the Before func - if err != beforeError { - t.Errorf("Run error expected, but not received") - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == true { - t.Errorf("Subcommand executed when NOT expected") - } - -} - -func TestAppNoHelpFlag(t *testing.T) { - oldFlag := cli.HelpFlag - defer func() { - cli.HelpFlag = oldFlag - }() - - cli.HelpFlag = cli.BoolFlag{} - - app := cli.NewApp() - err := app.Run([]string{"test", "-h"}) - - if err != flag.ErrHelp { - t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err) - } -} - -func TestAppHelpPrinter(t *testing.T) { - oldPrinter := cli.HelpPrinter - defer func() { - cli.HelpPrinter = oldPrinter - }() - - var wasCalled = false - cli.HelpPrinter = func(template string, data interface{}) { - wasCalled = true - } - - app := cli.NewApp() - app.Run([]string{"-h"}) - - if wasCalled == false { - t.Errorf("Help printer expected to be called, but was not") - } -} - -func TestAppVersionPrinter(t *testing.T) { - oldPrinter := cli.VersionPrinter - defer func() { - cli.VersionPrinter = oldPrinter - }() - - var wasCalled = false - cli.VersionPrinter = func(c *cli.Context) { - wasCalled = true - } - - app := cli.NewApp() - ctx := cli.NewContext(app, nil, nil) - cli.ShowVersion(ctx) - - if wasCalled == false { - t.Errorf("Version printer expected to be called, but was not") - } -} - -func TestAppCommandNotFound(t *testing.T) { - beforeRun, subcommandRun := false, false - app := cli.NewApp() - - app.CommandNotFound = func(c *cli.Context, command string) { - beforeRun = true - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "bar", - Action: func(c *cli.Context) { - subcommandRun = true - }, - }, - } - - app.Run([]string{"command", "foo"}) - - expect(t, beforeRun, true) - expect(t, subcommandRun, false) -} - -func TestGlobalFlagsInSubcommands(t *testing.T) { - subcommandRun := false - app := cli.NewApp() - - app.Flags = []cli.Flag{ - cli.BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "foo", - Subcommands: []cli.Command{ - { - Name: "bar", - Action: func(c *cli.Context) { - if c.GlobalBool("debug") { - subcommandRun = true - } - }, - }, - }, - }, - } - - app.Run([]string{"command", "-d", "foo", "bar"}) - - expect(t, subcommandRun, true) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete deleted file mode 100644 index 9b55dd99..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete +++ /dev/null @@ -1,13 +0,0 @@ -#! /bin/bash - -_cli_bash_autocomplete() { - local cur prev opts base - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD-1]}" - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) - return 0 - } - - complete -F _cli_bash_autocomplete $PROG \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete deleted file mode 100644 index 5430a18f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete +++ /dev/null @@ -1,5 +0,0 @@ -autoload -U compinit && compinit -autoload -U bashcompinit && bashcompinit - -script_dir=$(dirname $0) -source ${script_dir}/bash_autocomplete diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go deleted file mode 100644 index b7425458..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package cli provides a minimal framework for creating and organizing command line -// Go applications. cli is designed to be easy to understand and write, the most simple -// cli application can be written as follows: -// func main() { -// cli.NewApp().Run(os.Args) -// } -// -// Of course this application does not do much, so let's make this an actual application: -// func main() { -// app := cli.NewApp() -// app.Name = "greet" -// app.Usage = "say a greeting" -// app.Action = func(c *cli.Context) { -// println("Greetings") -// } -// -// app.Run(os.Args) -// } -package cli diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go deleted file mode 100644 index 879a793d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package cli_test - -import ( - "os" - - "github.com/codegangsta/cli" -) - -func Example() { - app := cli.NewApp() - app.Name = "todo" - app.Usage = "task list on the command line" - app.Commands = []cli.Command{ - { - Name: "add", - ShortName: "a", - Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - ShortName: "c", - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - }, - } - - app.Run(os.Args) -} - -func ExampleSubcommand() { - app := cli.NewApp() - app.Name = "say" - app.Commands = []cli.Command{ - { - Name: "hello", - ShortName: "hi", - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ - { - Name: "english", - ShortName: "en", - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hello, ", c.String("name")) - }, - }, { - Name: "spanish", - ShortName: "sp", - Usage: "sends a greeting in spanish", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "surname", - Value: "Jones", - Usage: "Surname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hola, ", c.String("surname")) - }, - }, { - Name: "french", - ShortName: "fr", - Usage: "sends a greeting in french", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "nickname", - Value: "Stevie", - Usage: "Nickname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Bonjour, ", c.String("nickname")) - }, - }, - }, - }, { - Name: "bye", - Usage: "says goodbye", - Action: func(c *cli.Context) { - println("bye") - }, - }, - } - - app.Run(os.Args) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/command.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/command.go deleted file mode 100644 index ffd3ef81..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/command.go +++ /dev/null @@ -1,156 +0,0 @@ -package cli - -import ( - "fmt" - "io/ioutil" - "strings" -) - -// Command is a subcommand for a cli.App. -type Command struct { - // The name of the command - Name string - // short name of the command. Typically one character - ShortName string - // A short description of the usage of this command - Usage string - // A longer explanation of how the command works - Description string - // The function to call when checking for bash command completions - BashComplete func(context *Context) - // An action to execute before any sub-subcommands are run, but after the context is ready - // If a non-nil error is returned, no sub-subcommands are run - Before func(context *Context) error - // The function to call when this command is invoked - Action func(context *Context) - // List of child commands - Subcommands []Command - // List of flags to parse - Flags []Flag - // Treat all flags as normal arguments if true - SkipFlagParsing bool - // Boolean to hide built-in help command - HideHelp bool -} - -// Invokes the command given the context, parses ctx.Args() to generate command-specific flags -func (c Command) Run(ctx *Context) error { - - if len(c.Subcommands) > 0 || c.Before != nil { - return c.startApp(ctx) - } - - if !c.HideHelp && (HelpFlag != BoolFlag{}) { - // append help to flags - c.Flags = append( - c.Flags, - HelpFlag, - ) - } - - if ctx.App.EnableBashCompletion { - c.Flags = append(c.Flags, BashCompletionFlag) - } - - set := flagSet(c.Name, c.Flags) - set.SetOutput(ioutil.Discard) - - firstFlagIndex := -1 - terminatorIndex := -1 - for index, arg := range ctx.Args() { - if arg == "--" { - terminatorIndex = index - break - } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { - firstFlagIndex = index - } - } - - var err error - if firstFlagIndex > -1 && !c.SkipFlagParsing { - args := ctx.Args() - regularArgs := make([]string, len(args[1:firstFlagIndex])) - copy(regularArgs, args[1:firstFlagIndex]) - - var flagArgs []string - if terminatorIndex > -1 { - flagArgs = args[firstFlagIndex:terminatorIndex] - regularArgs = append(regularArgs, args[terminatorIndex:]...) - } else { - flagArgs = args[firstFlagIndex:] - } - - err = set.Parse(append(flagArgs, regularArgs...)) - } else { - err = set.Parse(ctx.Args().Tail()) - } - - if err != nil { - fmt.Fprint(ctx.App.Writer, "Incorrect Usage.\n\n") - ShowCommandHelp(ctx, c.Name) - fmt.Fprintln(ctx.App.Writer) - return err - } - - nerr := normalizeFlags(c.Flags, set) - if nerr != nil { - fmt.Fprintln(ctx.App.Writer, nerr) - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) - fmt.Fprintln(ctx.App.Writer) - return nerr - } - context := NewContext(ctx.App, set, ctx.globalSet) - - if checkCommandCompletions(context, c.Name) { - return nil - } - - if checkCommandHelp(context, c.Name) { - return nil - } - context.Command = c - c.Action(context) - return nil -} - -// Returns true if Command.Name or Command.ShortName matches given name -func (c Command) HasName(name string) bool { - return c.Name == name || c.ShortName == name -} - -func (c Command) startApp(ctx *Context) error { - app := NewApp() - - // set the name and usage - app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) - if c.Description != "" { - app.Usage = c.Description - } else { - app.Usage = c.Usage - } - - // set CommandNotFound - app.CommandNotFound = ctx.App.CommandNotFound - - // set the flags and commands - app.Commands = c.Subcommands - app.Flags = c.Flags - app.HideHelp = c.HideHelp - - // bash completion - app.EnableBashCompletion = ctx.App.EnableBashCompletion - if c.BashComplete != nil { - app.BashComplete = c.BashComplete - } - - // set the actions - app.Before = c.Before - if c.Action != nil { - app.Action = c.Action - } else { - app.Action = helpSubcommand.Action - } - - return app.RunAsSubcommand(ctx) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go deleted file mode 100644 index c0f556ad..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package cli_test - -import ( - "flag" - "testing" - - "github.com/codegangsta/cli" -) - -func TestCommandDoNotIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah", "-break"} - set.Parse(test) - - c := cli.NewContext(app, set, set) - - command := cli.Command{ - Name: "test-cmd", - ShortName: "tc", - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) {}, - } - err := command.Run(c) - - expect(t, err.Error(), "flag provided but not defined: -break") -} - -func TestCommandIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah"} - set.Parse(test) - - c := cli.NewContext(app, set, set) - - command := cli.Command{ - Name: "test-cmd", - ShortName: "tc", - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) {}, - SkipFlagParsing: true, - } - err := command.Run(c) - - expect(t, err, nil) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/context.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/context.go deleted file mode 100644 index c9f645b1..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/context.go +++ /dev/null @@ -1,339 +0,0 @@ -package cli - -import ( - "errors" - "flag" - "strconv" - "strings" - "time" -) - -// Context is a type that is passed through to -// each Handler action in a cli application. Context -// can be used to retrieve context-specific Args and -// parsed command-line options. -type Context struct { - App *App - Command Command - flagSet *flag.FlagSet - globalSet *flag.FlagSet - setFlags map[string]bool - globalSetFlags map[string]bool -} - -// Creates a new context. For use in when invoking an App or Command action. -func NewContext(app *App, set *flag.FlagSet, globalSet *flag.FlagSet) *Context { - return &Context{App: app, flagSet: set, globalSet: globalSet} -} - -// Looks up the value of a local int flag, returns 0 if no int flag exists -func (c *Context) Int(name string) int { - return lookupInt(name, c.flagSet) -} - -// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) Duration(name string) time.Duration { - return lookupDuration(name, c.flagSet) -} - -// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists -func (c *Context) Float64(name string) float64 { - return lookupFloat64(name, c.flagSet) -} - -// Looks up the value of a local bool flag, returns false if no bool flag exists -func (c *Context) Bool(name string) bool { - return lookupBool(name, c.flagSet) -} - -// Looks up the value of a local boolT flag, returns false if no bool flag exists -func (c *Context) BoolT(name string) bool { - return lookupBoolT(name, c.flagSet) -} - -// Looks up the value of a local string flag, returns "" if no string flag exists -func (c *Context) String(name string) string { - return lookupString(name, c.flagSet) -} - -// Looks up the value of a local string slice flag, returns nil if no string slice flag exists -func (c *Context) StringSlice(name string) []string { - return lookupStringSlice(name, c.flagSet) -} - -// Looks up the value of a local int slice flag, returns nil if no int slice flag exists -func (c *Context) IntSlice(name string) []int { - return lookupIntSlice(name, c.flagSet) -} - -// Looks up the value of a local generic flag, returns nil if no generic flag exists -func (c *Context) Generic(name string) interface{} { - return lookupGeneric(name, c.flagSet) -} - -// Looks up the value of a global int flag, returns 0 if no int flag exists -func (c *Context) GlobalInt(name string) int { - return lookupInt(name, c.globalSet) -} - -// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) GlobalDuration(name string) time.Duration { - return lookupDuration(name, c.globalSet) -} - -// Looks up the value of a global bool flag, returns false if no bool flag exists -func (c *Context) GlobalBool(name string) bool { - return lookupBool(name, c.globalSet) -} - -// Looks up the value of a global string flag, returns "" if no string flag exists -func (c *Context) GlobalString(name string) string { - return lookupString(name, c.globalSet) -} - -// Looks up the value of a global string slice flag, returns nil if no string slice flag exists -func (c *Context) GlobalStringSlice(name string) []string { - return lookupStringSlice(name, c.globalSet) -} - -// Looks up the value of a global int slice flag, returns nil if no int slice flag exists -func (c *Context) GlobalIntSlice(name string) []int { - return lookupIntSlice(name, c.globalSet) -} - -// Looks up the value of a global generic flag, returns nil if no generic flag exists -func (c *Context) GlobalGeneric(name string) interface{} { - return lookupGeneric(name, c.globalSet) -} - -// Determines if the flag was actually set -func (c *Context) IsSet(name string) bool { - if c.setFlags == nil { - c.setFlags = make(map[string]bool) - c.flagSet.Visit(func(f *flag.Flag) { - c.setFlags[f.Name] = true - }) - } - return c.setFlags[name] == true -} - -// Determines if the global flag was actually set -func (c *Context) GlobalIsSet(name string) bool { - if c.globalSetFlags == nil { - c.globalSetFlags = make(map[string]bool) - c.globalSet.Visit(func(f *flag.Flag) { - c.globalSetFlags[f.Name] = true - }) - } - return c.globalSetFlags[name] == true -} - -// Returns a slice of flag names used in this context. -func (c *Context) FlagNames() (names []string) { - for _, flag := range c.Command.Flags { - name := strings.Split(flag.getName(), ",")[0] - if name == "help" { - continue - } - names = append(names, name) - } - return -} - -// Returns a slice of global flag names used by the app. -func (c *Context) GlobalFlagNames() (names []string) { - for _, flag := range c.App.Flags { - name := strings.Split(flag.getName(), ",")[0] - if name == "help" || name == "version" { - continue - } - names = append(names, name) - } - return -} - -type Args []string - -// Returns the command line arguments associated with the context. -func (c *Context) Args() Args { - args := Args(c.flagSet.Args()) - return args -} - -// Returns the nth argument, or else a blank string -func (a Args) Get(n int) string { - if len(a) > n { - return a[n] - } - return "" -} - -// Returns the first argument, or else a blank string -func (a Args) First() string { - return a.Get(0) -} - -// Return the rest of the arguments (not the first one) -// or else an empty string slice -func (a Args) Tail() []string { - if len(a) >= 2 { - return []string(a)[1:] - } - return []string{} -} - -// Checks if there are any arguments present -func (a Args) Present() bool { - return len(a) != 0 -} - -// Swaps arguments at the given indexes -func (a Args) Swap(from, to int) error { - if from >= len(a) || to >= len(a) { - return errors.New("index out of range") - } - a[from], a[to] = a[to], a[from] - return nil -} - -func lookupInt(name string, set *flag.FlagSet) int { - f := set.Lookup(name) - if f != nil { - val, err := strconv.Atoi(f.Value.String()) - if err != nil { - return 0 - } - return val - } - - return 0 -} - -func lookupDuration(name string, set *flag.FlagSet) time.Duration { - f := set.Lookup(name) - if f != nil { - val, err := time.ParseDuration(f.Value.String()) - if err == nil { - return val - } - } - - return 0 -} - -func lookupFloat64(name string, set *flag.FlagSet) float64 { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseFloat(f.Value.String(), 64) - if err != nil { - return 0 - } - return val - } - - return 0 -} - -func lookupString(name string, set *flag.FlagSet) string { - f := set.Lookup(name) - if f != nil { - return f.Value.String() - } - - return "" -} - -func lookupStringSlice(name string, set *flag.FlagSet) []string { - f := set.Lookup(name) - if f != nil { - return (f.Value.(*StringSlice)).Value() - - } - - return nil -} - -func lookupIntSlice(name string, set *flag.FlagSet) []int { - f := set.Lookup(name) - if f != nil { - return (f.Value.(*IntSlice)).Value() - - } - - return nil -} - -func lookupGeneric(name string, set *flag.FlagSet) interface{} { - f := set.Lookup(name) - if f != nil { - return f.Value - } - return nil -} - -func lookupBool(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return val - } - - return false -} - -func lookupBoolT(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return true - } - return val - } - - return false -} - -func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { - switch ff.Value.(type) { - case *StringSlice: - default: - set.Set(name, ff.Value.String()) - } -} - -func normalizeFlags(flags []Flag, set *flag.FlagSet) error { - visited := make(map[string]bool) - set.Visit(func(f *flag.Flag) { - visited[f.Name] = true - }) - for _, f := range flags { - parts := strings.Split(f.getName(), ",") - if len(parts) == 1 { - continue - } - var ff *flag.Flag - for _, name := range parts { - name = strings.Trim(name, " ") - if visited[name] { - if ff != nil { - return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) - } - ff = set.Lookup(name) - } - } - if ff == nil { - continue - } - for _, name := range parts { - name = strings.Trim(name, " ") - if !visited[name] { - copyFlag(name, ff, set) - } - } - } - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go deleted file mode 100644 index 7c9a4436..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package cli_test - -import ( - "flag" - "testing" - "time" - - "github.com/codegangsta/cli" -) - -func TestNewContext(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Int("myflag", 42, "doc") - command := cli.Command{Name: "mycommand"} - c := cli.NewContext(nil, set, globalSet) - c.Command = command - expect(t, c.Int("myflag"), 12) - expect(t, c.GlobalInt("myflag"), 42) - expect(t, c.Command.Name, "mycommand") -} - -func TestContext_Int(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Int("myflag"), 12) -} - -func TestContext_Duration(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Duration("myflag", time.Duration(12*time.Second), "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Duration("myflag"), time.Duration(12*time.Second)) -} - -func TestContext_String(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.String("myflag", "hello world", "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.String("myflag"), "hello world") -} - -func TestContext_Bool(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Bool("myflag"), false) -} - -func TestContext_BoolT(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", true, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.BoolT("myflag"), true) -} - -func TestContext_Args(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) - set.Parse([]string{"--myflag", "bat", "baz"}) - expect(t, len(c.Args()), 2) - expect(t, c.Bool("myflag"), true) -} - -func TestContext_IsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - c := cli.NewContext(nil, set, globalSet) - set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) - expect(t, c.IsSet("myflag"), true) - expect(t, c.IsSet("otherflag"), false) - expect(t, c.IsSet("bogusflag"), false) - expect(t, c.IsSet("myflagGlobal"), false) -} - -func TestContext_GlobalIsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - globalSet.Bool("myflagGlobalUnset", true, "doc") - c := cli.NewContext(nil, set, globalSet) - set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) - expect(t, c.GlobalIsSet("myflag"), false) - expect(t, c.GlobalIsSet("otherflag"), false) - expect(t, c.GlobalIsSet("bogusflag"), false) - expect(t, c.GlobalIsSet("myflagGlobal"), true) - expect(t, c.GlobalIsSet("myflagGlobalUnset"), false) - expect(t, c.GlobalIsSet("bogusGlobal"), false) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go deleted file mode 100644 index 25115866..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go +++ /dev/null @@ -1,454 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "os" - "strconv" - "strings" - "time" -) - -// This flag enables bash-completion for all commands and subcommands -var BashCompletionFlag = BoolFlag{ - Name: "generate-bash-completion", -} - -// This flag prints the version for the application -var VersionFlag = BoolFlag{ - Name: "version, v", - Usage: "print the version", -} - -// This flag prints the help for all commands and subcommands -// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand -// unless HideHelp is set to true) -var HelpFlag = BoolFlag{ - Name: "help, h", - Usage: "show help", -} - -// Flag is a common interface related to parsing flags in cli. -// For more advanced flag parsing techniques, it is recomended that -// this interface be implemented. -type Flag interface { - fmt.Stringer - // Apply Flag settings to the given flag set - Apply(*flag.FlagSet) - getName() string -} - -func flagSet(name string, flags []Flag) *flag.FlagSet { - set := flag.NewFlagSet(name, flag.ContinueOnError) - - for _, f := range flags { - f.Apply(set) - } - return set -} - -func eachName(longName string, fn func(string)) { - parts := strings.Split(longName, ",") - for _, name := range parts { - name = strings.Trim(name, " ") - fn(name) - } -} - -// Generic is a generic parseable type identified by a specific flag -type Generic interface { - Set(value string) error - String() string -} - -// GenericFlag is the flag type for types implementing Generic -type GenericFlag struct { - Name string - Value Generic - Usage string - EnvVar string -} - -// String returns the string representation of the generic flag to display the -// help text to the user (uses the String() method of the generic flag to show -// the value) -func (f GenericFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s%s \"%v\"\t%v", prefixFor(f.Name), f.Name, f.Value, f.Usage)) -} - -// Apply takes the flagset and calls Set on the generic flag with the value -// provided by the user for parsing by the flag -func (f GenericFlag) Apply(set *flag.FlagSet) { - val := f.Value - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - val.Set(envVal) - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -func (f GenericFlag) getName() string { - return f.Name -} - -type StringSlice []string - -func (f *StringSlice) Set(value string) error { - *f = append(*f, value) - return nil -} - -func (f *StringSlice) String() string { - return fmt.Sprintf("%s", *f) -} - -func (f *StringSlice) Value() []string { - return *f -} - -type StringSliceFlag struct { - Name string - Value *StringSlice - Usage string - EnvVar string -} - -func (f StringSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) -} - -func (f StringSliceFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &StringSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - newVal.Set(s) - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -func (f StringSliceFlag) getName() string { - return f.Name -} - -type IntSlice []int - -func (f *IntSlice) Set(value string) error { - - tmp, err := strconv.Atoi(value) - if err != nil { - return err - } else { - *f = append(*f, tmp) - } - return nil -} - -func (f *IntSlice) String() string { - return fmt.Sprintf("%d", *f) -} - -func (f *IntSlice) Value() []int { - return *f -} - -type IntSliceFlag struct { - Name string - Value *IntSlice - Usage string - EnvVar string -} - -func (f IntSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) -} - -func (f IntSliceFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &IntSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - err := newVal.Set(s) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - } - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -func (f IntSliceFlag) getName() string { - return f.Name -} - -type BoolFlag struct { - Name string - Usage string - EnvVar string -} - -func (f BoolFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) -} - -func (f BoolFlag) Apply(set *flag.FlagSet) { - val := false - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - } - break - } - } - } - - eachName(f.Name, func(name string) { - set.Bool(name, val, f.Usage) - }) -} - -func (f BoolFlag) getName() string { - return f.Name -} - -type BoolTFlag struct { - Name string - Usage string - EnvVar string -} - -func (f BoolTFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) -} - -func (f BoolTFlag) Apply(set *flag.FlagSet) { - val := true - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - break - } - } - } - } - - eachName(f.Name, func(name string) { - set.Bool(name, val, f.Usage) - }) -} - -func (f BoolTFlag) getName() string { - return f.Name -} - -type StringFlag struct { - Name string - Value string - Usage string - EnvVar string -} - -func (f StringFlag) String() string { - var fmtString string - fmtString = "%s %v\t%v" - - if len(f.Value) > 0 { - fmtString = "%s \"%v\"\t%v" - } else { - fmtString = "%s %v\t%v" - } - - return withEnvHint(f.EnvVar, fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f StringFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - f.Value = envVal - break - } - } - } - - eachName(f.Name, func(name string) { - set.String(name, f.Value, f.Usage) - }) -} - -func (f StringFlag) getName() string { - return f.Name -} - -type IntFlag struct { - Name string - Value int - Usage string - EnvVar string -} - -func (f IntFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f IntFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err == nil { - f.Value = int(envValInt) - break - } - } - } - } - - eachName(f.Name, func(name string) { - set.Int(name, f.Value, f.Usage) - }) -} - -func (f IntFlag) getName() string { - return f.Name -} - -type DurationFlag struct { - Name string - Value time.Duration - Usage string - EnvVar string -} - -func (f DurationFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f DurationFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValDuration, err := time.ParseDuration(envVal) - if err == nil { - f.Value = envValDuration - break - } - } - } - } - - eachName(f.Name, func(name string) { - set.Duration(name, f.Value, f.Usage) - }) -} - -func (f DurationFlag) getName() string { - return f.Name -} - -type Float64Flag struct { - Name string - Value float64 - Usage string - EnvVar string -} - -func (f Float64Flag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f Float64Flag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValFloat, err := strconv.ParseFloat(envVal, 10) - if err == nil { - f.Value = float64(envValFloat) - } - } - } - } - - eachName(f.Name, func(name string) { - set.Float64(name, f.Value, f.Usage) - }) -} - -func (f Float64Flag) getName() string { - return f.Name -} - -func prefixFor(name string) (prefix string) { - if len(name) == 1 { - prefix = "-" - } else { - prefix = "--" - } - - return -} - -func prefixedNames(fullName string) (prefixed string) { - parts := strings.Split(fullName, ",") - for i, name := range parts { - name = strings.Trim(name, " ") - prefixed += prefixFor(name) + name - if i < len(parts)-1 { - prefixed += ", " - } - } - return -} - -func withEnvHint(envVar, str string) string { - envText := "" - if envVar != "" { - envText = fmt.Sprintf(" [$%s]", strings.Join(strings.Split(envVar, ","), ", $")) - } - return str + envText -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go deleted file mode 100644 index f0f096a2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go +++ /dev/null @@ -1,742 +0,0 @@ -package cli_test - -import ( - "fmt" - "os" - "reflect" - "strings" - "testing" - - "github.com/codegangsta/cli" -) - -var boolFlagTests = []struct { - name string - expected string -}{ - {"help", "--help\t"}, - {"h", "-h\t"}, -} - -func TestBoolFlagHelpOutput(t *testing.T) { - - for _, test := range boolFlagTests { - flag := cli.BoolFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -var stringFlagTests = []struct { - name string - value string - expected string -}{ - {"help", "", "--help \t"}, - {"h", "", "-h \t"}, - {"h", "", "-h \t"}, - {"test", "Something", "--test \"Something\"\t"}, -} - -func TestStringFlagHelpOutput(t *testing.T) { - - for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_FOO", "derp") - for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_FOO]") { - t.Errorf("%s does not end with [$APP_FOO]", output) - } - } -} - -var stringSliceFlagTests = []struct { - name string - value *cli.StringSlice - expected string -}{ - {"help", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "--help [--help option --help option]\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "-h [-h option -h option]\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "-h [-h option -h option]\t"}, - {"test", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("Something") - return s - }(), "--test [--test option --test option]\t"}, -} - -func TestStringSliceFlagHelpOutput(t *testing.T) { - - for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_QWWX", "11,4") - for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_QWWX]") { - t.Errorf("%q does not end with [$APP_QWWX]", output) - } - } -} - -var intFlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestIntFlagHelpOutput(t *testing.T) { - - for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestIntFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAR", "2") - for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var durationFlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestDurationFlagHelpOutput(t *testing.T) { - - for _, test := range durationFlagTests { - flag := cli.DurationFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAR", "2h3m6s") - for _, test := range durationFlagTests { - flag := cli.DurationFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var intSliceFlagTests = []struct { - name string - value *cli.IntSlice - expected string -}{ - {"help", &cli.IntSlice{}, "--help [--help option --help option]\t"}, - {"h", &cli.IntSlice{}, "-h [-h option -h option]\t"}, - {"h", &cli.IntSlice{}, "-h [-h option -h option]\t"}, - {"test", func() *cli.IntSlice { - i := &cli.IntSlice{} - i.Set("9") - return i - }(), "--test [--test option --test option]\t"}, -} - -func TestIntSliceFlagHelpOutput(t *testing.T) { - - for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_SMURF", "42,3") - for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_SMURF]") { - t.Errorf("%q does not end with [$APP_SMURF]", output) - } - } -} - -var float64FlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestFloat64FlagHelpOutput(t *testing.T) { - - for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAZ", "99.4") - for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAZ]") { - t.Errorf("%s does not end with [$APP_BAZ]", output) - } - } -} - -var genericFlagTests = []struct { - name string - value cli.Generic - expected string -}{ - {"test", &Parser{"abc", "def"}, "--test \"abc,def\"\ttest flag"}, - {"t", &Parser{"abc", "def"}, "-t \"abc,def\"\ttest flag"}, -} - -func TestGenericFlagHelpOutput(t *testing.T) { - - for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_ZAP", "3") - for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_ZAP]") { - t.Errorf("%s does not end with [$APP_ZAP]", output) - } - } -} - -func TestParseMultiString(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("serve") != "10" { - t.Errorf("main name not set") - } - if ctx.String("s") != "10" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiStringFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_COUNT", "20") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_COUNT", "20") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "serve, s", Value: &cli.StringSlice{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiStringSliceFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiInt(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("serve") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("s") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiIntFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "serve, s", Value: &cli.IntSlice{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.IntSlice("s"), []int{10, 20}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiIntSliceFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiIntSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiFloat64(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("serve") != 10.2 { - t.Errorf("main name not set") - } - if ctx.Float64("s") != 10.2 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10.2"}) -} - -func TestParseMultiFloat64FromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiFloat64FromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBool(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("serve") != true { - t.Errorf("main name not set") - } - if ctx.Bool("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "1") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "1") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolT(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("serve") != true { - t.Errorf("main name not set") - } - if ctx.BoolT("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolTFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "0") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolTFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "0") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -type Parser [2]string - -func (p *Parser) Set(value string) error { - parts := strings.Split(value, ",") - if len(parts) != 2 { - return fmt.Errorf("invalid format") - } - - (*p)[0] = parts[0] - (*p)[1] = parts[1] - - return nil -} - -func (p *Parser) String() string { - return fmt.Sprintf("%s,%s", p[0], p[1]) -} - -func TestParseGeneric(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10,20"}) -} - -func TestParseGenericFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_SERVE", "20,30") - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"20", "30"}) { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseGenericFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_FOO", "99,2000") - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) { - t.Errorf("value not set from env") - } - }, - } - a.Run([]string{"run"}) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/help.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/help.go deleted file mode 100644 index bfb27885..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/help.go +++ /dev/null @@ -1,211 +0,0 @@ -package cli - -import "fmt" - -// The text template for the Default help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var AppHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - {{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...] - -VERSION: - {{.Version}}{{if or .Author .Email}} - -AUTHOR:{{if .Author}} - {{.Author}}{{if .Email}} - <{{.Email}}>{{end}}{{else}} - {{.Email}}{{end}}{{end}} - -COMMANDS: - {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} - {{end}}{{if .Flags}} -GLOBAL OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{end}} -` - -// The text template for the command help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var CommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - command {{.Name}}{{if .Flags}} [command options]{{end}} [arguments...]{{if .Description}} - -DESCRIPTION: - {{.Description}}{{end}}{{if .Flags}} - -OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{ end }} -` - -// The text template for the subcommand help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var SubcommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - {{.Name}} command{{if .Flags}} [command options]{{end}} [arguments...] - -COMMANDS: - {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} - {{end}}{{if .Flags}} -OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{end}} -` - -var helpCommand = Command{ - Name: "help", - ShortName: "h", - Usage: "Shows a list of commands or help for one command", - Action: func(c *Context) { - args := c.Args() - if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowAppHelp(c) - } - }, -} - -var helpSubcommand = Command{ - Name: "help", - ShortName: "h", - Usage: "Shows a list of commands or help for one command", - Action: func(c *Context) { - args := c.Args() - if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowSubcommandHelp(c) - } - }, -} - -// Prints help for the App -type helpPrinter func(templ string, data interface{}) - -var HelpPrinter helpPrinter = nil - -// Prints version for the App -var VersionPrinter = printVersion - -func ShowAppHelp(c *Context) { - HelpPrinter(AppHelpTemplate, c.App) -} - -// Prints the list of subcommands as the default app completion method -func DefaultAppComplete(c *Context) { - for _, command := range c.App.Commands { - fmt.Fprintln(c.App.Writer, command.Name) - if command.ShortName != "" { - fmt.Fprintln(c.App.Writer, command.ShortName) - } - } -} - -// Prints help for the given command -func ShowCommandHelp(c *Context, command string) { - for _, c := range c.App.Commands { - if c.HasName(command) { - HelpPrinter(CommandHelpTemplate, c) - return - } - } - - if c.App.CommandNotFound != nil { - c.App.CommandNotFound(c, command) - } else { - fmt.Fprintf(c.App.Writer, "No help topic for '%v'\n", command) - } -} - -// Prints help for the given subcommand -func ShowSubcommandHelp(c *Context) { - ShowCommandHelp(c, c.Command.Name) -} - -// Prints the version number of the App -func ShowVersion(c *Context) { - VersionPrinter(c) -} - -func printVersion(c *Context) { - fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) -} - -// Prints the lists of commands within a given context -func ShowCompletions(c *Context) { - a := c.App - if a != nil && a.BashComplete != nil { - a.BashComplete(c) - } -} - -// Prints the custom completions for a given command -func ShowCommandCompletions(ctx *Context, command string) { - c := ctx.App.Command(command) - if c != nil && c.BashComplete != nil { - c.BashComplete(ctx) - } -} - -func checkVersion(c *Context) bool { - if c.GlobalBool("version") { - ShowVersion(c) - return true - } - - return false -} - -func checkHelp(c *Context) bool { - if c.GlobalBool("h") || c.GlobalBool("help") { - ShowAppHelp(c) - return true - } - - return false -} - -func checkCommandHelp(c *Context, name string) bool { - if c.Bool("h") || c.Bool("help") { - ShowCommandHelp(c, name) - return true - } - - return false -} - -func checkSubcommandHelp(c *Context) bool { - if c.GlobalBool("h") || c.GlobalBool("help") { - ShowSubcommandHelp(c) - return true - } - - return false -} - -func checkCompletions(c *Context) bool { - if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { - ShowCompletions(c) - return true - } - - return false -} - -func checkCommandCompletions(c *Context, name string) bool { - if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { - ShowCommandCompletions(c, name) - return true - } - - return false -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go deleted file mode 100644 index cdc4feb2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package cli_test - -import ( - "reflect" - "testing" -) - -/* Test Helpers */ -func expect(t *testing.T, a interface{}, b interface{}) { - if a != b { - t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func refute(t *testing.T, a interface{}, b interface{}) { - if a == b { - t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go deleted file mode 100644 index 17b2d3ce..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go +++ /dev/null @@ -1,1265 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "encoding/base64" - "encoding/xml" - "fmt" - "github.com/denverdino/aliyungo/util" - "io" - "io/ioutil" - "log" - "mime" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path" - "strconv" - "strings" - "time" -) - -const DefaultContentType = "application/octet-stream" - -// The Client type encapsulates operations with an OSS region. -type Client struct { - AccessKeyId string - AccessKeySecret string - Region Region - Internal bool - Secure bool - ConnectTimeout time.Duration - ReadTimeout time.Duration - - endpoint string - debug bool -} - -// The Bucket type encapsulates operations with an bucket. -type Bucket struct { - *Client - Name string -} - -// The Owner type represents the owner of the object in an bucket. -type Owner struct { - ID string - DisplayName string -} - -// Options struct -// -type Options struct { - ServerSideEncryption bool - Meta map[string][]string - ContentEncoding string - CacheControl string - ContentMD5 string - ContentDisposition string - //Range string - //Expires int -} - -type CopyOptions struct { - Headers http.Header - CopySourceOptions string - MetadataDirective string - //ContentType string -} - -// CopyObjectResult is the output from a Copy request -type CopyObjectResult struct { - ETag string - LastModified string -} - -var attempts = util.AttemptStrategy{ - Min: 5, - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, -} - -// NewOSSClient creates a new OSS. - -func NewOSSClient(region Region, internal bool, accessKeyId string, accessKeySecret string, secure bool) *Client { - return &Client{ - AccessKeyId: accessKeyId, - AccessKeySecret: accessKeySecret, - Region: region, - Internal: internal, - debug: false, - Secure: secure, - } -} - -// SetDebug sets debug mode to log the request/response message -func (client *Client) SetDebug(debug bool) { - client.debug = debug -} - -// Bucket returns a Bucket with the given name. -func (client *Client) Bucket(name string) *Bucket { - name = strings.ToLower(name) - return &Bucket{ - Client: client, - Name: name, - } -} - -type BucketInfo struct { - Name string - CreationDate string -} - -type GetServiceResp struct { - Owner Owner - Buckets []BucketInfo `xml:">Bucket"` -} - -// GetService gets a list of all buckets owned by an account. -func (client *Client) GetService() (*GetServiceResp, error) { - bucket := client.Bucket("") - - r, err := bucket.Get("") - if err != nil { - return nil, err - } - - // Parse the XML response. - var resp GetServiceResp - if err = xml.Unmarshal(r, &resp); err != nil { - return nil, err - } - - return &resp, nil -} - -type ACL string - -const ( - Private = ACL("private") - PublicRead = ACL("public-read") - PublicReadWrite = ACL("public-read-write") - AuthenticatedRead = ACL("authenticated-read") - BucketOwnerRead = ACL("bucket-owner-read") - BucketOwnerFull = ACL("bucket-owner-full-control") -) - -var createBucketConfiguration = ` - %s -` - -// locationConstraint returns an io.Reader specifying a LocationConstraint if -// required for the region. -func (client *Client) locationConstraint() io.Reader { - constraint := fmt.Sprintf(createBucketConfiguration, client.Region) - return strings.NewReader(constraint) -} - -// override default endpoint -func (client *Client) SetEndpoint(endpoint string) { - // TODO check endpoint - client.endpoint = endpoint -} - -// PutBucket creates a new bucket. -func (b *Bucket) PutBucket(perm ACL) error { - headers := make(http.Header) - if perm != "" { - headers.Set("x-oss-acl", string(perm)) - } - req := &request{ - method: "PUT", - bucket: b.Name, - path: "/", - headers: headers, - payload: b.Client.locationConstraint(), - } - return b.Client.query(req, nil) -} - -// DelBucket removes an existing bucket. All objects in the bucket must -// be removed before the bucket itself can be removed. -func (b *Bucket) DelBucket() (err error) { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.Client.query(req, nil) - if !shouldRetry(err) { - break - } - } - return err -} - -// Get retrieves an object from an bucket. -func (b *Bucket) Get(path string) (data []byte, err error) { - body, err := b.GetReader(path) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(body) - body.Close() - return data, err -} - -// GetReader retrieves an object from an bucket, -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading. -func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { - resp, err := b.GetResponse(path) - if resp != nil { - return resp.Body, err - } - return nil, err -} - -// GetResponse retrieves an object from an bucket, -// returning the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { - return b.GetResponseWithHeaders(path, make(http.Header)) -} - -// GetResponseWithHeaders retrieves an object from an bucket -// Accepts custom headers to be sent as the second parameter -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponseWithHeaders(path string, headers http.Header) (resp *http.Response, err error) { - req := &request{ - bucket: b.Name, - path: path, - headers: headers, - } - err = b.Client.prepare(req) - if err != nil { - return nil, err - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Get retrieves an object from an bucket. -func (b *Bucket) GetWithParams(path string, params url.Values) (data []byte, err error) { - resp, err := b.GetResponseWithParamsAndHeaders(path, params, nil) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(resp.Body) - resp.Body.Close() - return data, err -} - -func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, headers http.Header) (resp *http.Response, err error) { - req := &request{ - bucket: b.Name, - path: path, - params: params, - headers: headers, - } - err = b.Client.prepare(req) - if err != nil { - return nil, err - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Exists checks whether or not an object exists on an bucket using a HEAD request. -func (b *Bucket) Exists(path string) (exists bool, err error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - } - err = b.Client.prepare(req) - if err != nil { - return - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.Client.run(req, nil) - - if shouldRetry(err) && attempt.HasNext() { - continue - } - - if err != nil { - // We can treat a 403 or 404 as non existance - if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { - return false, nil - } - return false, err - } - - if resp.StatusCode/100 == 2 { - exists = true - } - if resp.Body != nil { - resp.Body.Close() - } - return exists, err - } - return false, fmt.Errorf("OSS Currently Unreachable") -} - -// Head HEADs an object in the bucket, returns the response with -func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - headers: headers, - } - err := b.Client.prepare(req) - if err != nil { - return nil, err - } - - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, err - } - return nil, fmt.Errorf("OSS Currently Unreachable") -} - -// Put inserts an object into the bucket. -func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { - body := bytes.NewBuffer(data) - return b.PutReader(path, body, int64(len(data)), contType, perm, options) -} - -// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key -func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { - headers := make(http.Header) - - headers.Set("x-oss-acl", string(perm)) - headers.Set("x-oss-copy-source", source) - - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - } - resp := &CopyObjectResult{} - err := b.Client.query(req, resp) - if err != nil { - return resp, err - } - return resp, nil -} - -// PutReader inserts an object into the bucket by consuming data -// from r until EOF. -func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(length, 10)) - headers.Set("Content-Type", contType) - headers.Set("x-oss-acl", string(perm)) - - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - payload: r, - } - return b.Client.query(req, nil) -} - -// PutFile creates/updates object with file -func (b *Bucket) PutFile(path string, file *os.File, perm ACL, options Options) error { - var contentType string - if dotPos := strings.LastIndex(file.Name(), "."); dotPos == -1 { - contentType = DefaultContentType - } else { - if mimeType := mime.TypeByExtension(file.Name()[dotPos:]); mimeType == "" { - contentType = DefaultContentType - } else { - contentType = mimeType - } - } - stats, err := file.Stat() - if err != nil { - log.Panicf("Unable to read file %s stats.", file.Name()) - return nil - } - - return b.PutReader(path, file, stats.Size(), contentType, perm, options) -} - -// addHeaders adds o's specified fields to headers -func (o Options) addHeaders(headers http.Header) { - if o.ServerSideEncryption { - headers.Set("x-oss-server-side-encryption", "AES256") - } - if len(o.ContentEncoding) != 0 { - headers.Set("Content-Encoding", o.ContentEncoding) - } - if len(o.CacheControl) != 0 { - headers.Set("Cache-Control", o.CacheControl) - } - if len(o.ContentMD5) != 0 { - headers.Set("Content-MD5", o.ContentMD5) - } - if len(o.ContentDisposition) != 0 { - headers.Set("Content-Disposition", o.ContentDisposition) - } - - for k, v := range o.Meta { - for _, mv := range v { - headers.Add("x-oss-meta-"+k, mv) - } - } -} - -// addHeaders adds o's specified fields to headers -func (o CopyOptions) addHeaders(headers http.Header) { - if len(o.MetadataDirective) != 0 { - headers.Set("x-oss-metadata-directive", o.MetadataDirective) - } - if len(o.CopySourceOptions) != 0 { - headers.Set("x-oss-copy-source-range", o.CopySourceOptions) - } - if o.Headers != nil { - for k, v := range o.Headers { - newSlice := make([]string, len(v)) - copy(newSlice, v) - headers[k] = newSlice - } - } -} - -func makeXMLBuffer(doc []byte) *bytes.Buffer { - buf := new(bytes.Buffer) - buf.WriteString(xml.Header) - buf.Write(doc) - return buf -} - -type IndexDocument struct { - Suffix string `xml:"Suffix"` -} - -type ErrorDocument struct { - Key string `xml:"Key"` -} - -type RoutingRule struct { - ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` - RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` - RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` -} - -type RedirectAllRequestsTo struct { - HostName string `xml:"HostName"` - Protocol string `xml:"Protocol,omitempty"` -} - -type WebsiteConfiguration struct { - XMLName xml.Name `xml:"http://doc.oss-cn-hangzhou.aliyuncs.com WebsiteConfiguration"` - IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` - ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` - RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` - RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` -} - -// PutBucketWebsite configures a bucket as a website. -func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { - doc, err := xml.Marshal(configuration) - if err != nil { - return err - } - - buf := makeXMLBuffer(doc) - - return b.PutBucketSubresource("website", buf, int64(buf.Len())) -} - -func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(length, 10)) - - req := &request{ - path: "/", - method: "PUT", - bucket: b.Name, - headers: headers, - payload: r, - params: url.Values{subresource: {""}}, - } - - return b.Client.query(req, nil) -} - -// Del removes an object from the bucket. -func (b *Bucket) Del(path string) error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: path, - } - return b.Client.query(req, nil) -} - -type Delete struct { - Quiet bool `xml:"Quiet,omitempty"` - Objects []Object `xml:"Object"` -} - -type Object struct { - Key string `xml:"Key"` - VersionId string `xml:"VersionId,omitempty"` -} - -// DelMulti removes up to 1000 objects from the bucket. -func (b *Bucket) DelMulti(objects Delete) error { - doc, err := xml.Marshal(objects) - if err != nil { - return err - } - - buf := makeXMLBuffer(doc) - digest := md5.New() - size, err := digest.Write(buf.Bytes()) - if err != nil { - return err - } - - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(int64(size), 10)) - headers.Set("Content-MD5", base64.StdEncoding.EncodeToString(digest.Sum(nil))) - headers.Set("Content-Type", "text/xml") - - req := &request{ - path: "/", - method: "POST", - params: url.Values{"delete": {""}}, - bucket: b.Name, - headers: headers, - payload: buf, - } - - return b.Client.query(req, nil) -} - -// The ListResp type holds the results of a List bucket operation. -type ListResp struct { - Name string - Prefix string - Delimiter string - Marker string - MaxKeys int - // IsTruncated is true if the results have been truncated because - // there are more keys and prefixes than can fit in MaxKeys. - // N.B. this is the opposite sense to that documented (incorrectly) in - // http://goo.gl/YjQTc - IsTruncated bool - Contents []Key - CommonPrefixes []string `xml:">Prefix"` - // if IsTruncated is true, pass NextMarker as marker argument to List() - // to get the next set of keys - NextMarker string -} - -// The Key type represents an item stored in an bucket. -type Key struct { - Key string - LastModified string - Type string - Size int64 - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - StorageClass string - Owner Owner -} - -// List returns information about objects in an bucket. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// The marker parameter specifies the key to start with when listing objects -// in a bucket. OSS lists objects in alphabetical order and -// will return keys alphabetically greater than the marker. -// -// The max parameter specifies how many keys + common prefixes to return in -// the response. The default is 1000. -// -// For example, given these keys in a bucket: -// -// index.html -// index2.html -// photos/2006/January/sample.jpg -// photos/2006/February/sample2.jpg -// photos/2006/February/sample3.jpg -// photos/2006/February/sample4.jpg -// -// Listing this bucket with delimiter set to "/" would yield the -// following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Contents: []Key{ -// {Key: "index.html", "index2.html"}, -// }, -// CommonPrefixes: []string{ -// "photos/", -// }, -// } -// -// Listing the same bucket with delimiter set to "/" and prefix set to -// "photos/2006/" would yield the following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Prefix: "photos/2006/", -// CommonPrefixes: []string{ -// "photos/2006/February/", -// "photos/2006/January/", -// }, -// } -// -func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { - params := make(url.Values) - params.Set("prefix", prefix) - params.Set("delimiter", delim) - params.Set("marker", marker) - if max != 0 { - params.Set("max-keys", strconv.FormatInt(int64(max), 10)) - } - req := &request{ - bucket: b.Name, - params: params, - } - result = &ListResp{} - for attempt := attempts.Start(); attempt.Next(); { - err = b.Client.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - // if NextMarker is not returned, it should be set to the name of last key, - // so let's do it so that each caller doesn't have to - if result.IsTruncated && result.NextMarker == "" { - n := len(result.Contents) - if n > 0 { - result.NextMarker = result.Contents[n-1].Key - } - } - return result, nil -} - -//// The VersionsResp type holds the results of a list bucket Versions operation. -//type VersionsResp struct { -// Name string -// Prefix string -// KeyMarker string -// VersionIdMarker string -// MaxKeys int -// Delimiter string -// IsTruncated bool -// Versions []Version `xml:"Version"` -// CommonPrefixes []string `xml:">Prefix"` -//} - -//// The Version type represents an object version stored in an bucket. -//type Version struct { -// Key string -// VersionId string -// IsLatest bool -// LastModified string -// // ETag gives the hex-encoded MD5 sum of the contents, -// // surrounded with double-quotes. -// ETag string -// Size int64 -// Owner Owner -// StorageClass string -//} - -//func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { -// params := url.Values{} -// params.Set("versions", "") -// params.Set("prefix", prefix) -// params.Set("delimiter", delim) - -// if len(versionIdMarker) != 0 { -// params["version-id-marker"] = []string{versionIdMarker} -// } -// if len(keyMarker) != 0 { -// params["key-marker"] = []string{keyMarker} -// } - -// if max != 0 { -// params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} -// } -// req := &request{ -// bucket: b.Name, -// params: params, -// } -// result = &VersionsResp{} -// for attempt := attempts.Start(); attempt.Next(); { -// err = b.Client.query(req, result) -// if !shouldRetry(err) { -// break -// } -// } -// if err != nil { -// return nil, err -// } -// return result, nil -//} - -type GetLocationResp struct { - Location string `xml:",innerxml"` -} - -func (b *Bucket) Location() (string, error) { - params := make(url.Values) - params.Set("location", "") - r, err := b.GetWithParams("/", params) - - if err != nil { - return "", err - } - - // Parse the XML response. - var resp GetLocationResp - if err = xml.Unmarshal(r, &resp); err != nil { - return "", err - } - - if resp.Location == "" { - return string(Hangzhou), nil - } - return resp.Location, nil -} - -func (b *Bucket) Path(path string) string { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return "/" + b.Name + path -} - -// URL returns a non-signed URL that allows retriving the -// object at path. It only works if the object is publicly -// readable (see SignedURL). -func (b *Bucket) URL(path string) string { - req := &request{ - bucket: b.Name, - path: path, - } - err := b.Client.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - u.RawQuery = "" - return u.String() -} - -// SignedURL returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURL(path string, expires time.Time) string { - return b.SignedURLWithArgs(path, expires, nil, nil) -} - -// SignedURLWithArgs returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { - return b.SignedURLWithMethod("GET", path, expires, params, headers) -} - -// SignedURLWithMethod returns a signed URL that allows anyone holding the URL -// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. -func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { - var uv = url.Values{} - - if params != nil { - uv = params - } - - uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) - uv.Set("OSSAccessKeyId", b.AccessKeyId) - - req := &request{ - method: method, - bucket: b.Name, - path: path, - params: uv, - headers: headers, - } - err := b.Client.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - - return u.String() -} - -// UploadSignedURL returns a signed URL that allows anyone holding the URL -// to upload the object at path. The signature is valid until expires. -// contenttype is a string like image/png -// name is the resource name in OSS terminology like images/ali.png [obviously excluding the bucket name itself] -func (b *Bucket) UploadSignedURL(name, method, contentType string, expires time.Time) string { - //TODO TESTING - expireDate := expires.Unix() - if method != "POST" { - method = "PUT" - } - - tokenData := "" - - stringToSign := method + "\n\n" + contentType + "\n" + strconv.FormatInt(expireDate, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name) - secretKey := b.AccessKeySecret - accessId := b.AccessKeyId - mac := hmac.New(sha1.New, []byte(secretKey)) - mac.Write([]byte(stringToSign)) - macsum := mac.Sum(nil) - signature := base64.StdEncoding.EncodeToString([]byte(macsum)) - signature = strings.TrimSpace(signature) - - signedurl, err := url.Parse("https://" + b.Name + ".client.amazonaws.com/") - if err != nil { - log.Println("ERROR sining url for OSS upload", err) - return "" - } - signedurl.Path = name - params := url.Values{} - params.Add("OSSAccessKeyId", accessId) - params.Add("Expires", strconv.FormatInt(expireDate, 10)) - params.Add("Signature", signature) - - signedurl.RawQuery = params.Encode() - return signedurl.String() -} - -// PostFormArgsEx returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -// Additional conditions can be specified with conds -func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { - conditions := []string{} - fields = map[string]string{ - "AWSAccessKeyId": b.AccessKeyId, - "key": path, - } - - if conds != nil { - conditions = append(conditions, conds...) - } - - conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) - conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) - if redirect != "" { - conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) - fields["success_action_redirect"] = redirect - } - - vExpiration := expires.Format("2006-01-02T15:04:05Z") - vConditions := strings.Join(conditions, ",") - policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) - policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) - fields["policy"] = policy64 - - signer := hmac.New(sha1.New, []byte(b.AccessKeySecret)) - signer.Write([]byte(policy64)) - fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) - - action = fmt.Sprintf("%s/%s/", b.Client.Region, b.Name) - return -} - -// PostFormArgs returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { - return b.PostFormArgsEx(path, expires, redirect, nil) -} - -type request struct { - method string - bucket string - path string - params url.Values - headers http.Header - baseurl string - payload io.Reader - prepared bool -} - -func (req *request) url() (*url.URL, error) { - u, err := url.Parse(req.baseurl) - if err != nil { - return nil, fmt.Errorf("bad OSS endpoint URL %q: %v", req.baseurl, err) - } - u.RawQuery = req.params.Encode() - u.Path = req.path - return u, nil -} - -// query prepares and runs the req request. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (client *Client) query(req *request, resp interface{}) error { - err := client.prepare(req) - if err != nil { - return err - } - r, err := client.run(req, resp) - if r != nil && r.Body != nil { - r.Body.Close() - } - return err -} - -// Sets baseurl on req from bucket name and the region endpoint -func (client *Client) setBaseURL(req *request) error { - - if client.endpoint == "" { - req.baseurl = client.Region.GetEndpoint(client.Internal, req.bucket, client.Secure) - } else { - req.baseurl = fmt.Sprintf("%s://%s", getProtocol(client.Secure), client.endpoint) - } - - return nil -} - -// partiallyEscapedPath partially escapes the OSS path allowing for all OSS REST API calls. -// -// Some commands including: -// GET Bucket acl http://goo.gl/aoXflF -// GET Bucket cors http://goo.gl/UlmBdx -// GET Bucket lifecycle http://goo.gl/8Fme7M -// GET Bucket policy http://goo.gl/ClXIo3 -// GET Bucket location http://goo.gl/5lh8RD -// GET Bucket Logging http://goo.gl/sZ5ckF -// GET Bucket notification http://goo.gl/qSSZKD -// GET Bucket tagging http://goo.gl/QRvxnM -// require the first character after the bucket name in the path to be a literal '?' and -// not the escaped hex representation '%3F'. -func partiallyEscapedPath(path string) string { - pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") - if len(pathEscapedAndSplit) >= 3 { - if len(pathEscapedAndSplit[2]) >= 3 { - // Check for the one "?" that should not be escaped. - if pathEscapedAndSplit[2][0:3] == "%3F" { - pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] - } - } - } - return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) -} - -// prepare sets up req to be delivered to OSS. -func (client *Client) prepare(req *request) error { - // Copy so they can be mutated without affecting on retries. - headers := copyHeader(req.headers) - params := make(url.Values) - - for k, v := range req.params { - params[k] = v - } - - req.params = params - req.headers = headers - - if !req.prepared { - req.prepared = true - if req.method == "" { - req.method = "GET" - } - - if !strings.HasPrefix(req.path, "/") { - req.path = "/" + req.path - } - - err := client.setBaseURL(req) - if err != nil { - return err - } - } - - req.headers.Set("Date", util.GetGMTime()) - client.signRequest(req) - - return nil -} - -// Prepares an *http.Request for doHttpRequest -func (client *Client) setupHttpRequest(req *request) (*http.Request, error) { - // Copy so that signing the http request will not mutate it - - u, err := req.url() - if err != nil { - return nil, err - } - u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) - - hreq := http.Request{ - URL: u, - Method: req.method, - ProtoMajor: 1, - ProtoMinor: 1, - Close: true, - Header: req.headers, - Form: req.params, - } - - contentLength := req.headers.Get("Content-Length") - - if contentLength != "" { - hreq.ContentLength, _ = strconv.ParseInt(contentLength, 10, 64) - req.headers.Del("Content-Length") - } - - if req.payload != nil { - hreq.Body = ioutil.NopCloser(req.payload) - } - - return &hreq, nil -} - -// doHttpRequest sends hreq and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (client *Client) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) { - c := http.Client{ - Transport: &http.Transport{ - Dial: func(netw, addr string) (c net.Conn, err error) { - deadline := time.Now().Add(client.ReadTimeout) - if client.ConnectTimeout > 0 { - c, err = net.DialTimeout(netw, addr, client.ConnectTimeout) - } else { - c, err = net.Dial(netw, addr) - } - if err != nil { - return - } - if client.ReadTimeout > 0 { - err = c.SetDeadline(deadline) - } - return - }, - Proxy: http.ProxyFromEnvironment, - }, - } - - hresp, err := c.Do(hreq) - if err != nil { - return nil, err - } - if client.debug { - log.Printf("%s %s %d\n", hreq.Method, hreq.URL.String(), hresp.StatusCode) - contentType := hresp.Header.Get("Content-Type") - if contentType == "application/xml" || contentType == "text/xml" { - dump, _ := httputil.DumpResponse(hresp, true) - log.Printf("} -> %s\n", dump) - } else { - log.Printf("Response Content-Type: %s\n", contentType) - } - } - if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { - return nil, client.buildError(hresp) - } - if resp != nil { - err = xml.NewDecoder(hresp.Body).Decode(resp) - hresp.Body.Close() - - if client.debug { - log.Printf("aliyungo.oss> decoded xml into %#v", resp) - } - - } - return hresp, err -} - -// run sends req and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (client *Client) run(req *request, resp interface{}) (*http.Response, error) { - if client.debug { - log.Printf("Running OSS request: %#v", req) - } - - hreq, err := client.setupHttpRequest(req) - if err != nil { - return nil, err - } - - return client.doHttpRequest(hreq, resp) -} - -// Error represents an error in an operation with OSS. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - Code string // OSS error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - BucketName string - RequestId string - HostId string -} - -func (e *Error) Error() string { - return e.Message -} - -func (client *Client) buildError(r *http.Response) error { - if client.debug { - log.Printf("got error (status code %v)", r.StatusCode) - data, err := ioutil.ReadAll(r.Body) - if err != nil { - log.Printf("\tread error: %v", err) - } else { - log.Printf("\tdata:\n%s\n\n", data) - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - } - - err := Error{} - // TODO return error if Unmarshal fails? - xml.NewDecoder(r.Body).Decode(&err) - r.Body.Close() - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - if client.debug { - log.Printf("err: %#v\n", err) - } - return &err -} - -func shouldRetry(err error) bool { - if err == nil { - return false - } - switch err { - case io.ErrUnexpectedEOF, io.EOF: - return true - } - switch e := err.(type) { - case *net.DNSError: - return true - case *net.OpError: - switch e.Op { - case "read", "write": - return true - } - case *url.Error: - // url.Error can be returned either by net/url if a URL cannot be - // parsed, or by net/http if the response is closed before the headers - // are received or parsed correctly. In that later case, e.Op is set to - // the HTTP method name with the first letter uppercased. We don't want - // to retry on POST operations, since those are not idempotent, all the - // other ones should be safe to retry. - switch e.Op { - case "Get", "Put", "Delete", "Head": - return shouldRetry(e.Err) - default: - return false - } - case *Error: - switch e.Code { - case "InternalError", "NoSuchUpload", "NoSuchBucket": - return true - } - } - return false -} - -func hasCode(err error, code string) bool { - e, ok := err.(*Error) - return ok && e.Code == code -} - -func copyHeader(header http.Header) (newHeader http.Header) { - newHeader = make(http.Header) - for k, v := range header { - newSlice := make([]string, len(v)) - copy(newSlice, v) - newHeader[k] = newSlice - } - return -} - -type AccessControlPolicy struct { - Owner Owner - Grants []string `xml:"AccessControlList>Grant"` -} - -// ACL returns ACL of bucket -func (b *Bucket) ACL() (result *AccessControlPolicy, err error) { - - params := make(url.Values) - params.Set("acl", "") - - r, err := b.GetWithParams("/", params) - if err != nil { - return nil, err - } - - // Parse the XML response. - var resp AccessControlPolicy - if err = xml.Unmarshal(r, &resp); err != nil { - return nil, err - } - - return &resp, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go deleted file mode 100644 index 13bc8768..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package oss_test - -import ( - "bytes" - "io/ioutil" - //"net/http" - "testing" - "time" - - "github.com/denverdino/aliyungo/oss" -) - -var ( - //If you test on ECS, you can set the internal param to true - client = oss.NewOSSClient(TestRegion, false, TestAccessKeyId, TestAccessKeySecret, false) -) - -func TestCreateBucket(t *testing.T) { - - b := client.Bucket(TestBucket) - err := b.PutBucket(oss.Private) - if err != nil { - t.Errorf("Failed for PutBucket: %v", err) - } - t.Log("Wait a while for bucket creation ...") - time.Sleep(10 * time.Second) -} - -func TestHead(t *testing.T) { - - b := client.Bucket(TestBucket) - _, err := b.Head("name", nil) - - if err == nil { - t.Errorf("Failed for Head: %v", err) - } -} - -func TestPutObject(t *testing.T) { - const DISPOSITION = "attachment; filename=\"0x1a2b3c.jpg\"" - - b := client.Bucket(TestBucket) - err := b.Put("name", []byte("content"), "content-type", oss.Private, oss.Options{ContentDisposition: DISPOSITION}) - if err != nil { - t.Errorf("Failed for Put: %v", err) - } -} - -func TestGet(t *testing.T) { - - b := client.Bucket(TestBucket) - data, err := b.Get("name") - - if err != nil || string(data) != "content" { - t.Errorf("Failed for Get: %v", err) - } -} - -func TestURL(t *testing.T) { - - b := client.Bucket(TestBucket) - url := b.URL("name") - - t.Log("URL: ", url) - // /c.Assert(req.URL.Path, check.Equals, "/denverdino_test/name") -} - -func TestGetReader(t *testing.T) { - - b := client.Bucket(TestBucket) - rc, err := b.GetReader("name") - if err != nil { - t.Fatalf("Failed for GetReader: %v", err) - } - data, err := ioutil.ReadAll(rc) - rc.Close() - if err != nil || string(data) != "content" { - t.Errorf("Failed for ReadAll: %v", err) - } -} - -func aTestGetNotFound(t *testing.T) { - - b := client.Bucket("non-existent-bucket") - _, err := b.Get("non-existent") - if err == nil { - t.Fatalf("Failed for TestGetNotFound: %v", err) - } - ossErr, _ := err.(*oss.Error) - if ossErr.StatusCode != 404 || ossErr.BucketName != "non-existent-bucket" { - t.Errorf("Failed for TestGetNotFound: %v", err) - } - -} - -func TestPutCopy(t *testing.T) { - b := client.Bucket(TestBucket) - t.Log("Source: ", b.Path("name")) - res, err := b.PutCopy("newname", oss.Private, oss.CopyOptions{}, - b.Path("name")) - if err == nil { - t.Logf("Copy result: %v", res) - } else { - t.Errorf("Failed for PutCopy: %v", err) - } -} - -func TestList(t *testing.T) { - - b := client.Bucket(TestBucket) - - data, err := b.List("n", "", "", 0) - if err != nil || len(data.Contents) != 2 { - t.Errorf("Failed for List: %v", err) - } else { - t.Logf("Contents = %++v", data) - } -} - -func TestListWithDelimiter(t *testing.T) { - - b := client.Bucket(TestBucket) - - data, err := b.List("photos/2006/", "/", "some-marker", 1000) - if err != nil || len(data.Contents) != 0 { - t.Errorf("Failed for List: %v", err) - } else { - t.Logf("Contents = %++v", data) - } - -} - -func TestPutReader(t *testing.T) { - - b := client.Bucket(TestBucket) - buf := bytes.NewBufferString("content") - err := b.PutReader("name", buf, int64(buf.Len()), "content-type", oss.Private, oss.Options{}) - if err != nil { - t.Errorf("Failed for PutReader: %v", err) - } - TestGetReader(t) -} - -func TestExists(t *testing.T) { - - b := client.Bucket(TestBucket) - result, err := b.Exists("name") - if err != nil || result != true { - t.Errorf("Failed for Exists: %v", err) - } -} - -func TestLocation(t *testing.T) { - b := client.Bucket(TestBucket) - result, err := b.Location() - - if err != nil || result != string(TestRegion) { - t.Errorf("Failed for Location: %v %s", err, result) - } -} - -func TestACL(t *testing.T) { - b := client.Bucket(TestBucket) - result, err := b.ACL() - - if err != nil { - t.Errorf("Failed for ACL: %v", err) - } else { - t.Logf("AccessControlPolicy: %++v", result) - } -} - -func TestDelObject(t *testing.T) { - - b := client.Bucket(TestBucket) - err := b.Del("name") - if err != nil { - t.Errorf("Failed for Del: %v", err) - } -} - -func TestDelMultiObjects(t *testing.T) { - - b := client.Bucket(TestBucket) - objects := []oss.Object{oss.Object{Key: "newname"}} - err := b.DelMulti(oss.Delete{ - Quiet: false, - Objects: objects, - }) - if err != nil { - t.Errorf("Failed for DelMulti: %v", err) - } -} - -func TestGetService(t *testing.T) { - bucketList, err := client.GetService() - if err != nil { - t.Errorf("Unable to get service: %v", err) - } else { - t.Logf("GetService: %++v", bucketList) - } -} - -func TestDelBucket(t *testing.T) { - - b := client.Bucket(TestBucket) - err := b.DelBucket() - if err != nil { - t.Errorf("Failed for DelBucket: %v", err) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go deleted file mode 100644 index 7c0d2549..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package oss_test - -import ( - "github.com/denverdino/aliyungo/oss" -) - -//Modify with your Access Key Id and Access Key Secret -const ( - TestAccessKeyId = "MY_ACCESS_KEY_ID" - TestAccessKeySecret = "MY_ACCESS_KEY_ID" - TestIAmRich = false - TestRegion = oss.Beijing - TestBucket = "denverdino" -) diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go deleted file mode 100644 index ebdb0477..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go +++ /dev/null @@ -1,23 +0,0 @@ -package oss - -import ( - "github.com/denverdino/aliyungo/util" -) - -var originalStrategy = attempts - -func SetAttemptStrategy(s *util.AttemptStrategy) { - if s == nil { - attempts = originalStrategy - } else { - attempts = *s - } -} - -func SetListPartsMax(n int) { - listPartsMax = n -} - -func SetListMultiMax(n int) { - listMultiMax = n -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go deleted file mode 100644 index 5b6491ec..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go +++ /dev/null @@ -1,464 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "errors" - "io" - //"log" - "net/http" - "net/url" - "sort" - "strconv" - "strings" -) - -// Multi represents an unfinished multipart upload. -// -// Multipart uploads allow sending big objects in smaller chunks. -// After all parts have been sent, the upload must be explicitly -// completed by calling Complete with the list of parts. - -type Multi struct { - Bucket *Bucket - Key string - UploadId string -} - -// That's the default. Here just for testing. -var listMultiMax = 1000 - -type listMultiResp struct { - NextKeyMarker string - NextUploadIdMarker string - IsTruncated bool - Upload []Multi - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` -} - -// ListMulti returns the list of unfinished multipart uploads in b. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. You can use prefixes to separate a bucket into different -// groupings of keys (to get the feeling of folders, for example). -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { - params := make(url.Values) - params.Set("uploads", "") - params.Set("max-uploads", strconv.FormatInt(int64(listMultiMax), 10)) - params.Set("prefix", prefix) - params.Set("delimiter", delim) - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: b.Name, - params: params, - } - var resp listMultiResp - err := b.Client.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, nil, err - } - for i := range resp.Upload { - multi := &resp.Upload[i] - multi.Bucket = b - multis = append(multis, multi) - } - prefixes = append(prefixes, resp.CommonPrefixes...) - if !resp.IsTruncated { - return multis, prefixes, nil - } - params.Set("key-marker", resp.NextKeyMarker) - params.Set("upload-id-marker", resp.NextUploadIdMarker) - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -// Multi returns a multipart upload handler for the provided key -// inside b. If a multipart upload exists for key, it is returned, -// otherwise a new multipart upload is initiated with contType and perm. -func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { - multis, _, err := b.ListMulti(key, "") - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - for _, m := range multis { - if m.Key == key { - return m, nil - } - } - return b.InitMulti(key, contType, perm, options) -} - -// InitMulti initializes a new multipart upload at the provided -// key inside b and returns a value for manipulating it. -// -func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { - headers := make(http.Header) - headers.Set("Content-Length", "0") - headers.Set("Content-Type", contType) - headers.Set("x-oss-acl", string(perm)) - - options.addHeaders(headers) - params := make(url.Values) - params.Set("uploads", "") - req := &request{ - method: "POST", - bucket: b.Name, - path: key, - headers: headers, - params: params, - } - var err error - var resp struct { - UploadId string `xml:"UploadId"` - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.Client.query(req, &resp) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil -} - -func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { - // TODO source format a /BUCKET/PATH/TO/OBJECT - // TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path) - - headers := make(http.Header) - headers.Set("x-oss-copy-source", source) - - options.addHeaders(headers) - params := make(url.Values) - params.Set("uploadId", m.UploadId) - params.Set("partNumber", strconv.FormatInt(int64(n), 10)) - - sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/")) - //log.Println("source: ", source) - //log.Println("sourceBucket: ", sourceBucket.Name) - //log.Println("HEAD: ", strings.strings.SplitAfterN(source, "/", 3)[2]) - // TODO SplitAfterN can be use in bucket name - sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 3)[2], nil) - if err != nil { - return nil, Part{}, err - } - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - } - resp := &CopyObjectResult{} - err = m.Bucket.Client.query(req, resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, Part{}, err - } - if resp.ETag == "" { - return nil, Part{}, errors.New("part upload succeeded with no ETag") - } - return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil - } - panic("unreachable") -} - -// PutPart sends part n of the multipart upload, reading all the content from r. -// Each part, except for the last one, must be at least 5MB in size. -// -func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64) -} - -func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(partSize, 10)) - headers.Set("Content-MD5", md5b64) - - params := make(url.Values) - params.Set("uploadId", m.UploadId) - params.Set("partNumber", strconv.FormatInt(int64(n), 10)) - - for attempt := attempts.Start(); attempt.Next(); { - _, err := r.Seek(0, 0) - if err != nil { - return Part{}, err - } - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - payload: r, - } - err = m.Bucket.Client.prepare(req) - if err != nil { - return Part{}, err - } - resp, err := m.Bucket.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return Part{}, err - } - etag := resp.Header.Get("ETag") - if etag == "" { - return Part{}, errors.New("part upload succeeded with no ETag") - } - return Part{n, etag, partSize}, nil - } - panic("unreachable") -} - -func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { - _, err = r.Seek(0, 0) - if err != nil { - return 0, "", "", err - } - digest := md5.New() - size, err = io.Copy(digest, r) - if err != nil { - return 0, "", "", err - } - sum := digest.Sum(nil) - md5hex = hex.EncodeToString(sum) - md5b64 = base64.StdEncoding.EncodeToString(sum) - return size, md5hex, md5b64, nil -} - -type Part struct { - N int `xml:"PartNumber"` - ETag string - Size int64 -} - -type partSlice []Part - -func (s partSlice) Len() int { return len(s) } -func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } -func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type listPartsResp struct { - NextPartNumberMarker string - IsTruncated bool - Part []Part -} - -// That's the default. Here just for testing. -var listPartsMax = 1000 - -// ListParts for backcompatability. See the documentation for ListPartsFull -func (m *Multi) ListParts() ([]Part, error) { - return m.ListPartsFull(0, listPartsMax) -} - -// ListPartsFull returns the list of previously uploaded parts in m, -// ordered by part number (Only parts with higher part numbers than -// partNumberMarker will be listed). Only up to maxParts parts will be -// returned. -// -func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { - if maxParts > listPartsMax { - maxParts = listPartsMax - } - - params := make(url.Values) - params.Set("uploadId", m.UploadId) - params.Set("max-parts", strconv.FormatInt(int64(maxParts), 10)) - params.Set("part-number-marker", strconv.FormatInt(int64(partNumberMarker), 10)) - - var parts partSlice - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - var resp listPartsResp - err := m.Bucket.Client.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - parts = append(parts, resp.Part...) - if !resp.IsTruncated { - sort.Sort(parts) - return parts, nil - } - params.Set("part-number-marker", resp.NextPartNumberMarker) - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -type ReaderAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// PutAll sends all of r via a multipart upload with parts no larger -// than partSize bytes, which must be set to at least 5MB. -// Parts previously uploaded are either reused if their checksum -// and size match the new part, or otherwise overwritten with the -// new content. -// PutAll returns all the parts of m (reused or not). -func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { - old, err := m.ListParts() - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - reuse := 0 // Index of next old part to consider reusing. - current := 1 // Part number of latest good part handled. - totalSize, err := r.Seek(0, 2) - if err != nil { - return nil, err - } - first := true // Must send at least one empty part if the file is empty. - var result []Part -NextSection: - for offset := int64(0); offset < totalSize || first; offset += partSize { - first = false - if offset+partSize > totalSize { - partSize = totalSize - offset - } - section := io.NewSectionReader(r, offset, partSize) - _, md5hex, md5b64, err := seekerInfo(section) - if err != nil { - return nil, err - } - for reuse < len(old) && old[reuse].N <= current { - // Looks like this part was already sent. - part := &old[reuse] - etag := `"` + md5hex + `"` - if part.N == current && part.Size == partSize && part.ETag == etag { - // Checksum matches. Reuse the old part. - result = append(result, *part) - current++ - continue NextSection - } - reuse++ - } - - // Part wasn't found or doesn't match. Send it. - part, err := m.putPart(current, section, partSize, md5b64) - if err != nil { - return nil, err - } - result = append(result, part) - current++ - } - return result, nil -} - -type completeUpload struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Parts completeParts `xml:"Part"` -} - -type completePart struct { - PartNumber int - ETag string -} - -type completeParts []completePart - -func (p completeParts) Len() int { return len(p) } -func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } -func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Complete assembles the given previously uploaded parts into the -// final object. This operation may take several minutes. -// -func (m *Multi) Complete(parts []Part) error { - params := make(url.Values) - params.Set("uploadId", m.UploadId) - - c := completeUpload{} - for _, p := range parts { - c.Parts = append(c.Parts, completePart{p.N, p.ETag}) - } - sort.Sort(c.Parts) - data, err := xml.Marshal(&c) - if err != nil { - return err - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "POST", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - payload: bytes.NewReader(data), - } - err := m.Bucket.Client.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} - -// Abort deletes an unifinished multipart upload and any previously -// uploaded parts for it. -// -// After a multipart upload is aborted, no additional parts can be -// uploaded using it. However, if any part uploads are currently in -// progress, those part uploads might or might not succeed. As a result, -// it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// NOTE: If the described scenario happens to you, please report back to -// the goamz authors with details. In the future such retrying should be -// handled internally, but it's not clear what happens precisely (Is an -// error returned? Is the issue completely undetectable?). -// -func (m *Multi) Abort() error { - params := make(url.Values) - params.Set("uploadId", m.UploadId) - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "DELETE", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - err := m.Bucket.Client.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go deleted file mode 100644 index 6ecd63be..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package oss_test - -import ( - //"encoding/xml" - "github.com/denverdino/aliyungo/oss" - "testing" - //"io" - //"io/ioutil" - "strings" -) - -func TestCreateBucketMulti(t *testing.T) { - TestCreateBucket(t) -} - -func TestInitMulti(t *testing.T) { - b := client.Bucket(TestBucket) - - metadata := make(map[string][]string) - metadata["key1"] = []string{"value1"} - metadata["key2"] = []string{"value2"} - options := oss.Options{ - ServerSideEncryption: true, - Meta: metadata, - ContentEncoding: "text/utf8", - CacheControl: "no-cache", - ContentMD5: "0000000000000000", - } - - multi, err := b.InitMulti("multi", "text/plain", oss.Private, options) - if err != nil { - t.Errorf("Failed for InitMulti: %v", err) - } else { - t.Logf("InitMulti result: %++v", multi) - } -} - -func TestMultiReturnOld(t *testing.T) { - - b := client.Bucket(TestBucket) - - multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) - if err != nil { - t.Errorf("Failed for Multi: %v", err) - } else { - t.Logf("Multi result: %++v", multi) - } - -} - -func TestPutPart(t *testing.T) { - - b := client.Bucket(TestBucket) - - multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) - if err != nil { - t.Fatalf("Failed for Multi: %v", err) - } - - part, err := multi.PutPart(1, strings.NewReader("")) - if err != nil { - t.Errorf("Failed for PutPart: %v", err) - } else { - t.Logf("PutPart result: %++v", part) - } - -} -func TestPutPartCopy(t *testing.T) { - - TestPutObject(t) - - b := client.Bucket(TestBucket) - - multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) - if err != nil { - t.Fatalf("Failed for Multi: %v", err) - } - - res, part, err := multi.PutPartCopy(2, oss.CopyOptions{}, b.Path("name")) - if err != nil { - t.Errorf("Failed for PutPartCopy: %v", err) - } else { - t.Logf("PutPartCopy result: %++v %++v", part, res) - } - TestDelObject(t) -} - -func TestListParts(t *testing.T) { - - b := client.Bucket(TestBucket) - - multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) - if err != nil { - t.Fatalf("Failed for Multi: %v", err) - } - - parts, err := multi.ListParts() - if err != nil { - t.Errorf("Failed for ListParts: %v", err) - } else { - t.Logf("ListParts result: %++v", parts) - } -} -func TestListMulti(t *testing.T) { - - b := client.Bucket(TestBucket) - - multis, prefixes, err := b.ListMulti("", "/") - if err != nil { - t.Errorf("Failed for ListMulti: %v", err) - } else { - t.Logf("ListMulti result : %++v %++v", multis, prefixes) - } -} -func TestMultiAbort(t *testing.T) { - - b := client.Bucket(TestBucket) - - multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) - if err != nil { - t.Fatalf("Failed for Multi: %v", err) - } - - err = multi.Abort() - if err != nil { - t.Errorf("Failed for Abort: %v", err) - } - -} - -func TestPutAll(t *testing.T) { - TestInitMulti(t) - // Don't retry the NoSuchUpload error. - b := client.Bucket(TestBucket) - - multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) - if err != nil { - t.Fatalf("Failed for Multi: %v", err) - } - - // Must send at least one part, so that completing it will work. - parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5) - if err != nil { - t.Errorf("Failed for PutAll: %v", err) - } else { - t.Logf("PutAll result: %++v", parts) - } - // // Must send at least one part, so that completing it will work. - // err = multi.Complete(parts) - // if err != nil { - // t.Errorf("Failed for Complete: %v", err) - // } - err = multi.Abort() - if err != nil { - t.Errorf("Failed for Abort: %v", err) - } -} - -func TestCleanUp(t *testing.T) { - TestDelBucket(t) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go deleted file mode 100644 index 2bba7382..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go +++ /dev/null @@ -1,53 +0,0 @@ -package oss - -import ( - "fmt" -) - -// Region represents OSS region -type Region string - -// Constants of region definition -const ( - Hangzhou = Region("oss-cn-hangzhou") - Qingdao = Region("oss-cn-qingdao") - Beijing = Region("oss-cn-beijing") - Hongkong = Region("oss-cn-hongkong") - Shenzhen = Region("oss-cn-shenzhen") - USWest1 = Region("oss-us-west-1") - DefaultRegion = Hangzhou -) - -// GetEndpoint returns endpoint of region -func (r Region) GetEndpoint(internal bool, bucket string, secure bool) string { - if internal { - return r.GetInternalEndpoint(bucket, secure) - } - return r.GetInternetEndpoint(bucket, secure) -} - -func getProtocol(secure bool) string { - protocol := "http" - if secure { - protocol = "https" - } - return protocol -} - -// GetInternetEndpoint returns internet endpoint of region -func (r Region) GetInternetEndpoint(bucket string, secure bool) string { - protocol := getProtocol(secure) - if bucket == "" { - return fmt.Sprintf("%s://oss.aliyuncs.com", protocol) - } - return fmt.Sprintf("%s://%s.%s.aliyuncs.com", protocol, bucket, string(r)) -} - -// GetInternalEndpoint returns internal endpoint of region -func (r Region) GetInternalEndpoint(bucket string, secure bool) string { - protocol := getProtocol(secure) - if bucket == "" { - return fmt.Sprintf("%s://oss-internal.aliyuncs.com", protocol) - } - return fmt.Sprintf("%s://%s.%s-internal.aliyuncs.com", protocol, bucket, string(r)) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go deleted file mode 100644 index a261644a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go +++ /dev/null @@ -1,105 +0,0 @@ -package oss - -import ( - "github.com/denverdino/aliyungo/util" - //"log" - "net/http" - "net/url" - "sort" - "strings" -) - -const HeaderOSSPrefix = "x-oss-" - -var ossParamsToSign = map[string]bool{ - "acl": true, - "delete": true, - "location": true, - "logging": true, - "notification": true, - "partNumber": true, - "policy": true, - "requestPayment": true, - "torrent": true, - "uploadId": true, - "uploads": true, - "versionId": true, - "versioning": true, - "versions": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, -} - -func (client *Client) signRequest(request *request) { - query := request.params - - urlSignature := query.Get("OSSAccessKeyId") != "" - - headers := request.headers - contentMd5 := headers.Get("Content-Md5") - contentType := headers.Get("Content-Type") - date := "" - if urlSignature { - date = query.Get("Expires") - } else { - date = headers.Get("Date") - } - - resource := request.path - if request.bucket != "" { - resource = "/" + request.bucket + request.path - } - params := make(url.Values) - for k, v := range query { - if ossParamsToSign[k] { - params[k] = v - } - } - - if len(params) > 0 { - resource = resource + "?" + util.Encode(params) - } - - canonicalizedResource := resource - - _, canonicalizedHeader := canonicalizeHeader(headers) - - stringToSign := request.method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedHeader + canonicalizedResource - - //log.Println("stringToSign: ", stringToSign) - signature := util.CreateSignature(stringToSign, client.AccessKeySecret) - - if query.Get("OSSAccessKeyId") != "" { - query.Set("Signature", signature) - } else { - headers.Set("Authorization", "OSS "+client.AccessKeyId+":"+signature) - } -} - -//Have to break the abstraction to append keys with lower case. -func canonicalizeHeader(headers http.Header) (newHeaders http.Header, result string) { - var canonicalizedHeaders []string - newHeaders = http.Header{} - - for k, v := range headers { - if lower := strings.ToLower(k); strings.HasPrefix(lower, HeaderOSSPrefix) { - newHeaders[lower] = v - canonicalizedHeaders = append(canonicalizedHeaders, lower) - } else { - newHeaders[k] = v - } - } - - sort.Strings(canonicalizedHeaders) - - var canonicalizedHeader string - - for _, k := range canonicalizedHeaders { - canonicalizedHeader += k + ":" + headers.Get(k) + "\n" - } - return newHeaders, canonicalizedHeader -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go deleted file mode 100644 index 2d07f03a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go +++ /dev/null @@ -1,76 +0,0 @@ -package util - -import ( - "time" -) - -// AttemptStrategy is reused from the goamz package - -// AttemptStrategy represents a strategy for waiting for an action -// to complete successfully. This is an internal type used by the -// implementation of other packages. -type AttemptStrategy struct { - Total time.Duration // total duration of attempt. - Delay time.Duration // interval between each try in the burst. - Min int // minimum number of retries; overrides Total -} - -type Attempt struct { - strategy AttemptStrategy - last time.Time - end time.Time - force bool - count int -} - -// Start begins a new sequence of attempts for the given strategy. -func (s AttemptStrategy) Start() *Attempt { - now := time.Now() - return &Attempt{ - strategy: s, - last: now, - end: now.Add(s.Total), - force: true, - } -} - -// Next waits until it is time to perform the next attempt or returns -// false if it is time to stop trying. -func (a *Attempt) Next() bool { - now := time.Now() - sleep := a.nextSleep(now) - if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { - return false - } - a.force = false - if sleep > 0 && a.count > 0 { - time.Sleep(sleep) - now = time.Now() - } - a.count++ - a.last = now - return true -} - -func (a *Attempt) nextSleep(now time.Time) time.Duration { - sleep := a.strategy.Delay - now.Sub(a.last) - if sleep < 0 { - return 0 - } - return sleep -} - -// HasNext returns whether another attempt will be made if the current -// one fails. If it returns true, the following call to Next is -// guaranteed to return true. -func (a *Attempt) HasNext() bool { - if a.force || a.strategy.Min > a.count { - return true - } - now := time.Now() - if now.Add(a.nextSleep(now)).Before(a.end) { - a.force = true - return true - } - return false -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go deleted file mode 100644 index 50e9be7a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package util - -import ( - "testing" - "time" -) - -func TestAttemptTiming(t *testing.T) { - testAttempt := AttemptStrategy{ - Total: 0.25e9, - Delay: 0.1e9, - } - want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9} - got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing - t0 := time.Now() - for a := testAttempt.Start(); a.Next(); { - got = append(got, time.Now().Sub(t0)) - } - got = append(got, time.Now().Sub(t0)) - if len(got) != len(want) { - t.Fatalf("Failed!") - } - const margin = 0.01e9 - for i, got := range want { - lo := want[i] - margin - hi := want[i] + margin - if got < lo || got > hi { - t.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds()) - } - } -} - -func TestAttemptNextHasNext(t *testing.T) { - a := AttemptStrategy{}.Start() - if !a.Next() { - t.Fatalf("Failed!") - } - if a.Next() { - t.Fatalf("Failed!") - } - - a = AttemptStrategy{}.Start() - if !a.Next() { - t.Fatalf("Failed!") - } - if a.HasNext() { - t.Fatalf("Failed!") - } - if a.Next() { - t.Fatalf("Failed!") - } - a = AttemptStrategy{Total: 2e8}.Start() - - if !a.Next() { - t.Fatalf("Failed!") - } - if !a.HasNext() { - t.Fatalf("Failed!") - } - time.Sleep(2e8) - - if !a.HasNext() { - t.Fatalf("Failed!") - } - if !a.Next() { - t.Fatalf("Failed!") - } - if a.Next() { - t.Fatalf("Failed!") - } - - a = AttemptStrategy{Total: 1e8, Min: 2}.Start() - time.Sleep(1e8) - - if !a.Next() { - t.Fatalf("Failed!") - } - if !a.HasNext() { - t.Fatalf("Failed!") - } - if !a.Next() { - t.Fatalf("Failed!") - } - if a.HasNext() { - t.Fatalf("Failed!") - } - if a.Next() { - t.Fatalf("Failed!") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go deleted file mode 100644 index 56b900af..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go +++ /dev/null @@ -1,123 +0,0 @@ -package util - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "reflect" - "strconv" - "time" -) - -//ConvertToQueryValues converts the struct to url.Values -func ConvertToQueryValues(ifc interface{}) url.Values { - values := url.Values{} - SetQueryValues(ifc, &values) - return values -} - -//SetQueryValues sets the struct to existing url.Values following ECS encoding rules -func SetQueryValues(ifc interface{}, values *url.Values) { - setQueryValues(ifc, values, "") -} - -func setQueryValues(i interface{}, values *url.Values, prefix string) { - elem := reflect.ValueOf(i) - if elem.Kind() == reflect.Ptr { - elem = elem.Elem() - } - elemType := elem.Type() - for i := 0; i < elem.NumField(); i++ { - fieldName := elemType.Field(i).Name - field := elem.Field(i) - // TODO Use Tag for validation - // tag := typ.Field(i).Tag.Get("tagname") - kind := field.Kind() - if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { - continue - } - if kind == reflect.Ptr { - field = field.Elem() - kind = field.Kind() - } - var value string - //switch field.Interface().(type) { - switch kind { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - i := field.Int() - if i != 0 { - value = strconv.FormatInt(i, 10) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - i := field.Uint() - if i != 0 { - value = strconv.FormatUint(i, 10) - } - case reflect.Float32: - value = strconv.FormatFloat(field.Float(), 'f', 4, 32) - case reflect.Float64: - value = strconv.FormatFloat(field.Float(), 'f', 4, 64) - case reflect.Bool: - value = strconv.FormatBool(field.Bool()) - case reflect.String: - value = field.String() - case reflect.Slice: - switch field.Type().Elem().Kind() { - case reflect.Uint8: - value = string(field.Bytes()) - case reflect.String: - l := field.Len() - if l > 0 { - strArray := make([]string, l) - for i := 0; i < l; i++ { - strArray[i] = field.Index(i).String() - } - bytes, err := json.Marshal(strArray) - if err == nil { - value = string(bytes) - } else { - log.Printf("Failed to convert JSON: %v", err) - } - } - default: - l := field.Len() - for j := 0; j < l; j++ { - prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) - ifc := field.Index(j).Interface() - log.Printf("%s : %v", prefixName, ifc) - if ifc != nil { - setQueryValues(ifc, values, prefixName) - } - } - continue - } - - default: - switch field.Interface().(type) { - case ISO6801Time: - t := field.Interface().(ISO6801Time) - value = t.String() - case time.Time: - t := field.Interface().(time.Time) - value = GetISO8601TimeStamp(t) - default: - ifc := field.Interface() - if ifc != nil { - SetQueryValues(ifc, values) - continue - } - } - } - if value != "" { - name := elemType.Field(i).Tag.Get("ArgName") - if name == "" { - name = fieldName - } - if prefix != "" { - name = prefix + name - } - values.Set(name, value) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go deleted file mode 100644 index 049cd86b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package util - -import ( - "testing" - "time" -) - -type TestString string - -type SubStruct struct { - A string - B int -} - -type TestStruct struct { - Format string - Version string - AccessKeyId string - Timestamp time.Time - Empty string - IntValue int `ArgName:"int-value"` - BoolPtr *bool `ArgName:"bool-ptr"` - IntPtr *int `ArgName:"int-ptr"` - StringArray []string `ArgName:"str-array"` - StructArray []SubStruct - test TestString - tests []TestString -} - -func TestConvertToQueryValues(t *testing.T) { - boolValue := true - request := TestStruct{ - Format: "JSON", - Version: "1.0", - Timestamp: time.Date(2015, time.Month(5), 26, 1, 2, 3, 4, time.UTC), - IntValue: 10, - BoolPtr: &boolValue, - StringArray: []string{"abc", "xyz"}, - StructArray: []SubStruct{ - SubStruct{A: "a", B: 1}, - SubStruct{A: "x", B: 2}, - }, - test: TestString("test"), - tests: []TestString{TestString("test1"), TestString("test2")}, - } - result := ConvertToQueryValues(&request).Encode() - const expectedResult = "Format=JSON&StructArray.1.A=a&StructArray.1.B=1&StructArray.2.A=x&StructArray.2.B=2&Timestamp=2015-05-26T01%3A02%3A03Z&Version=1.0&bool-ptr=true&int-value=10&str-array=%5B%22abc%22%2C%22xyz%22%5D&test=test&tests=%5B%22test1%22%2C%22test2%22%5D" - if result != expectedResult { - t.Error("Incorrect encoding: ", result) - } - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go deleted file mode 100644 index 031b6175..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go +++ /dev/null @@ -1,62 +0,0 @@ -package util - -import ( - "fmt" - "time" -) - -// GetISO8601TimeStamp gets timestamp string in ISO8601 format -func GetISO8601TimeStamp(ts time.Time) string { - t := ts.UTC() - return fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02dZ", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) -} - -const formatISO8601 = "2006-01-02T15:04:05Z" -const jsonFormatISO8601 = `"` + formatISO8601 + `"` - -// A ISO6801Time represents a time in ISO8601 format -type ISO6801Time time.Time - -// New constructs a new iso8601.Time instance from an existing -// time.Time instance. This causes the nanosecond field to be set to -// 0, and its time zone set to a fixed zone with no offset from UTC -// (but it is *not* UTC itself). -func NewISO6801Time(t time.Time) ISO6801Time { - return ISO6801Time(time.Date( - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - 0, - time.UTC, - )) -} - -// IsDefault checks if the time is default -func (it *ISO6801Time) IsDefault() bool { - return *it == ISO6801Time{} -} - -// MarshalJSON serializes the ISO6801Time into JSON string -func (it ISO6801Time) MarshalJSON() ([]byte, error) { - return []byte(time.Time(it).Format(jsonFormatISO8601)), nil -} - -// UnmarshalJSON deserializes the ISO6801Time from JSON string -func (it *ISO6801Time) UnmarshalJSON(data []byte) error { - if string(data) == "\"\"" { - return nil - } - t, err := time.ParseInLocation(jsonFormatISO8601, string(data), time.UTC) - if err == nil { - *it = ISO6801Time(t) - } - return err -} - -// String returns the time in ISO6801Time format -func (it ISO6801Time) String() string { - return time.Time(it).Format(formatISO8601) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go deleted file mode 100644 index f2ba96a4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package util - -import ( - "encoding/json" - "testing" - "time" -) - -func TestISO8601Time(t *testing.T) { - now := NewISO6801Time(time.Now().UTC()) - - data, err := json.Marshal(now) - if err != nil { - t.Fatal(err) - } - - _, err = time.Parse(`"`+formatISO8601+`"`, string(data)) - if err != nil { - t.Fatal(err) - } - - var now2 ISO6801Time - err = json.Unmarshal(data, &now2) - if err != nil { - t.Fatal(err) - } - - if now != now2 { - t.Errorf("Time %s does not equal expected %s", now2, now) - } - - if now.String() != now2.String() { - t.Fatalf("String format for %s does not equal expected %s", now2, now) - } - - type TestTimeStruct struct { - A int - B *ISO6801Time - } - var testValue TestTimeStruct - err = json.Unmarshal([]byte("{\"A\": 1, \"B\":\"\"}"), &testValue) - if err != nil { - t.Fatal(err) - } - t.Logf("%v", testValue) - if !testValue.B.IsDefault() { - t.Fatal("Invaid Unmarshal result for ISO6801Time from empty value") - } - t.Logf("ISO6801Time String(): %s", now2.String()) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go deleted file mode 100644 index a00b27c1..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go +++ /dev/null @@ -1,40 +0,0 @@ -package util - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "net/url" - "strings" -) - -//CreateSignature creates signature for string following Aliyun rules -func CreateSignature(stringToSignature, accessKeySecret string) string { - // Crypto by HMAC-SHA1 - hmacSha1 := hmac.New(sha1.New, []byte(accessKeySecret)) - hmacSha1.Write([]byte(stringToSignature)) - sign := hmacSha1.Sum(nil) - - // Encode to Base64 - base64Sign := base64.StdEncoding.EncodeToString(sign) - - return base64Sign -} - -func percentReplace(str string) string { - str = strings.Replace(str, "+", "%20", -1) - str = strings.Replace(str, "*", "%2A", -1) - str = strings.Replace(str, "%7E", "~", -1) - - return str -} - -// CreateSignatureForRequest creates signature for query string values -func CreateSignatureForRequest(method string, values *url.Values, accessKeySecret string) string { - - canonicalizedQueryString := percentReplace(values.Encode()) - - stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString) - - return CreateSignature(stringToSign, accessKeySecret) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go deleted file mode 100644 index e5c22cca..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package util - -import ( - "testing" -) - -func TestCreateSignature(t *testing.T) { - - str := "GET&%2F&AccessKeyId%3Dtestid%26Action%3DDescribeRegions%26Format%3DXML%26RegionId%3Dregion1%26SignatureMethod%3DHMAC-SHA1%26SignatureNonce%3DNwDAxvLU6tFE0DVb%26SignatureVersion%3D1.0%26TimeStamp%3D2012-12-26T10%253A33%253A56Z%26Version%3D2014-05-26" - - signature := CreateSignature(str, "testsecret") - - t.Log(signature) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go deleted file mode 100644 index daa6bb02..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go +++ /dev/null @@ -1,134 +0,0 @@ -package util - -import ( - "bytes" - srand "crypto/rand" - "encoding/binary" - "math/rand" - "net/http" - "net/url" - "sort" - "strconv" - "time" -) - -//CreateRandomString create random string -func CreateRandomString() string { - - rand.Seed(time.Now().UnixNano()) - randInt := rand.Int63() - randStr := strconv.FormatInt(randInt, 36) - - return randStr -} - -// Encode encodes the values into ``URL encoded'' form -// ("acl&bar=baz&foo=quux") sorted by key. -func Encode(v url.Values) string { - if v == nil { - return "" - } - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := url.QueryEscape(k) - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - if v != "" { - buf.WriteString("=") - buf.WriteString(url.QueryEscape(v)) - } - } - } - return buf.String() -} - -func GetGMTime() string { - return time.Now().UTC().Format(http.TimeFormat) -} - -// - -func randUint32() uint32 { - return randUint32Slice(1)[0] -} - -func randUint32Slice(c int) []uint32 { - b := make([]byte, c*4) - - _, err := srand.Read(b) - - if err != nil { - // fail back to insecure rand - rand.Seed(time.Now().UnixNano()) - for i := range b { - b[i] = byte(rand.Int()) - } - } - - n := make([]uint32, c) - - for i := range n { - n[i] = binary.BigEndian.Uint32(b[i*4 : i*4+4]) - } - - return n -} - -func toByte(n uint32, st, ed byte) byte { - return byte(n%uint32(ed-st+1) + uint32(st)) -} - -func toDigit(n uint32) byte { - return toByte(n, '0', '9') -} - -func toLowerLetter(n uint32) byte { - return toByte(n, 'a', 'z') -} - -func toUpperLetter(n uint32) byte { - return toByte(n, 'A', 'Z') -} - -type convFunc func(uint32) byte - -var convFuncs = []convFunc{toDigit, toLowerLetter, toUpperLetter} - -// tools for generating a random ECS instance password -// from 8 to 30 char MUST contain digit upper, case letter and upper case letter -// http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance -func GenerateRandomECSPassword() string { - - // [8, 30] - l := int(randUint32()%23 + 8) - - n := randUint32Slice(l) - - b := make([]byte, l) - - b[0] = toDigit(n[0]) - b[1] = toLowerLetter(n[1]) - b[2] = toUpperLetter(n[2]) - - for i := 3; i < l; i++ { - b[i] = convFuncs[n[i]%3](n[i]) - } - - s := make([]byte, l) - perm := rand.Perm(l) - for i, v := range perm { - s[v] = b[i] - } - - return string(s) - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go deleted file mode 100644 index 87d2a0b8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package util - -import ( - "testing" -) - -func TestGenerateRandomECSPassword(t *testing.T) { - for i := 0; i < 10; i++ { - s := GenerateRandomECSPassword() - - if len(s) < 8 || len(s) > 30 { - t.Errorf("Generated ECS password [%v]: bad len", s) - } - - hasDigit := false - hasLower := false - hasUpper := false - - for j := range s { - - switch { - case '0' <= s[j] && s[j] <= '9': - hasDigit = true - case 'a' <= s[j] && s[j] <= 'z': - hasLower = true - case 'A' <= s[j] && s[j] <= 'Z': - hasUpper = true - } - } - - if !hasDigit { - t.Errorf("Generated ECS password [%v]: no digit", s) - } - - if !hasLower { - t.Errorf("Generated ECS password [%v]: no lower letter ", s) - } - - if !hasUpper { - t.Errorf("Generated ECS password [%v]: no upper letter", s) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go deleted file mode 100644 index 06a42825..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go +++ /dev/null @@ -1,20 +0,0 @@ -package tarsum - -// This interface extends TarSum by adding the Remove method. In general -// there was concern about adding this method to TarSum itself so instead -// it is being added just to "BuilderContext" which will then only be used -// during the .dockerignore file processing - see builder/evaluator.go -type BuilderContext interface { - TarSum - Remove(string) -} - -func (bc *tarSum) Remove(filename string) { - for i, fis := range bc.sums { - if fis.Name() == filename { - bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) - // Note, we don't just return because there could be - // more than one with this name - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go deleted file mode 100644 index 719f7289..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package tarsum - -import ( - "io" - "io/ioutil" - "os" - "testing" -) - -// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing -func TestTarSumRemoveNonExistent(t *testing.T) { - filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" - reader, err := os.Open(filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to read from %s: %s", filename, err) - } - - expected := len(ts.GetSums()) - - ts.(BuilderContext).Remove("") - ts.(BuilderContext).Remove("Anything") - - if len(ts.GetSums()) != expected { - t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) - } -} - -// Remove a tarsum (in the BuilderContext) -func TestTarSumRemove(t *testing.T) { - filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" - reader, err := os.Open(filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to read from %s: %s", filename, err) - } - - expected := len(ts.GetSums()) - 1 - - ts.(BuilderContext).Remove("etc/sudoers") - - if len(ts.GetSums()) != expected { - t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go deleted file mode 100644 index 32e5b378..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go +++ /dev/null @@ -1,116 +0,0 @@ -package tarsum - -import "sort" - -// This info will be accessed through interface so the actual name and sum cannot be medled with -type FileInfoSumInterface interface { - // File name - Name() string - // Checksum of this particular file and its headers - Sum() string - // Position of file in the tar - Pos() int64 -} - -type fileInfoSum struct { - name string - sum string - pos int64 -} - -func (fis fileInfoSum) Name() string { - return fis.name -} -func (fis fileInfoSum) Sum() string { - return fis.sum -} -func (fis fileInfoSum) Pos() int64 { - return fis.pos -} - -type FileInfoSums []FileInfoSumInterface - -// GetFile returns the first FileInfoSumInterface with a matching name -func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { - for i := range fis { - if fis[i].Name() == name { - return fis[i] - } - } - return nil -} - -// GetAllFile returns a FileInfoSums with all matching names -func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { - f := FileInfoSums{} - for i := range fis { - if fis[i].Name() == name { - f = append(f, fis[i]) - } - } - return f -} - -func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { - seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. - for i := range fis { - f := fis[i] - if _, ok := seen[f.Name()]; ok { - dups = append(dups, f) - } else { - seen[f.Name()] = 0 - } - } - return dups -} - -func (fis FileInfoSums) Len() int { return len(fis) } -func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } - -func (fis FileInfoSums) SortByPos() { - sort.Sort(byPos{fis}) -} - -func (fis FileInfoSums) SortByNames() { - sort.Sort(byName{fis}) -} - -func (fis FileInfoSums) SortBySums() { - dups := fis.GetDuplicatePaths() - if len(dups) > 0 { - sort.Sort(bySum{fis, dups}) - } else { - sort.Sort(bySum{fis, nil}) - } -} - -// byName is a sort.Sort helper for sorting by file names. -// If names are the same, order them by their appearance in the tar archive -type byName struct{ FileInfoSums } - -func (bn byName) Less(i, j int) bool { - if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { - return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() - } - return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() -} - -// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive -type bySum struct { - FileInfoSums - dups FileInfoSums -} - -func (bs bySum) Less(i, j int) bool { - if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { - return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() - } - return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() -} - -// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order -type byPos struct{ FileInfoSums } - -func (bp byPos) Less(i, j int) bool { - return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go deleted file mode 100644 index bb700d8b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package tarsum - -import "testing" - -func newFileInfoSums() FileInfoSums { - return FileInfoSums{ - fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, - fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, - fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, - fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, - fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, - fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, - } -} - -func TestSortFileInfoSums(t *testing.T) { - dups := newFileInfoSums().GetAllFile("dup1") - if len(dups) != 2 { - t.Errorf("expected length 2, got %d", len(dups)) - } - dups.SortByNames() - if dups[0].Pos() != 4 { - t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) - } - - fis := newFileInfoSums() - expected := "0abcdef1234567890" - fis.SortBySums() - got := fis[0].Sum() - if got != expected { - t.Errorf("Expected %q, got %q", expected, got) - } - - fis = newFileInfoSums() - expected = "dup1" - fis.SortByNames() - gotFis := fis[0] - if gotFis.Name() != expected { - t.Errorf("Expected %q, got %q", expected, gotFis.Name()) - } - // since a duplicate is first, ensure it is ordered first by position too - if gotFis.Pos() != 4 { - t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) - } - - fis = newFileInfoSums() - fis.SortByPos() - if fis[0].Pos() != 0 { - t.Errorf("sorted fileInfoSums by Pos should order them by position.") - } - - fis = newFileInfoSums() - expected = "deadbeef1" - gotFileInfoSum := fis.GetFile("dup1") - if gotFileInfoSum.Sum() != expected { - t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) - } - if fis.GetFile("noPresent") != nil { - t.Errorf("Should have return nil if name not found.") - } - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go deleted file mode 100644 index a778bb0b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go +++ /dev/null @@ -1,276 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "hash" - "io" - "strings" -) - -const ( - buf8K = 8 * 1024 - buf16K = 16 * 1024 - buf32K = 32 * 1024 -) - -// NewTarSum creates a new interface for calculating a fixed time checksum of a -// tar archive. -// -// This is used for calculating checksums of layers of an image, in some cases -// including the byte payload of the image's json metadata as well, and for -// calculating the checksums for buildcache. -func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - return NewTarSumHash(r, dc, v, DefaultTHash) -} - -// Create a new TarSum, providing a THash to use rather than the DefaultTHash -func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - headerSelector, err := getTarHeaderSelector(v) - if err != nil { - return nil, err - } - ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} - err = ts.initTarSum() - return ts, err -} - -// Create a new TarSum using the provided TarSum version+hash label. -func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { - parts := strings.SplitN(label, "+", 2) - if len(parts) != 2 { - return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") - } - - versionName, hashName := parts[0], parts[1] - - version, ok := tarSumVersionsByName[versionName] - if !ok { - return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) - } - - hashConfig, ok := standardHashConfigs[hashName] - if !ok { - return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) - } - - tHash := NewTHash(hashConfig.name, hashConfig.hash.New) - - return NewTarSumHash(r, disableCompression, version, tHash) -} - -// TarSum is the generic interface for calculating fixed time -// checksums of a tar archive -type TarSum interface { - io.Reader - GetSums() FileInfoSums - Sum([]byte) string - Version() Version - Hash() THash -} - -// tarSum struct is the structure for a Version0 checksum calculation -type tarSum struct { - io.Reader - tarR *tar.Reader - tarW *tar.Writer - writer writeCloseFlusher - bufTar *bytes.Buffer - bufWriter *bytes.Buffer - bufData []byte - h hash.Hash - tHash THash - sums FileInfoSums - fileCounter int64 - currentFile string - finished bool - first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use - headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive -} - -func (ts tarSum) Hash() THash { - return ts.tHash -} - -func (ts tarSum) Version() Version { - return ts.tarSumVersion -} - -// A hash.Hash type generator and its name -type THash interface { - Hash() hash.Hash - Name() string -} - -// Convenience method for creating a THash -func NewTHash(name string, h func() hash.Hash) THash { - return simpleTHash{n: name, h: h} -} - -type tHashConfig struct { - name string - hash crypto.Hash -} - -var ( - // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. - standardHashConfigs = map[string]tHashConfig{ - "sha256": {name: "sha256", hash: crypto.SHA256}, - "sha512": {name: "sha512", hash: crypto.SHA512}, - } -) - -// TarSum default is "sha256" -var DefaultTHash = NewTHash("sha256", sha256.New) - -type simpleTHash struct { - n string - h func() hash.Hash -} - -func (sth simpleTHash) Name() string { return sth.n } -func (sth simpleTHash) Hash() hash.Hash { return sth.h() } - -func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.headerSelector.selectHeaders(h) { - if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { - return err - } - } - return nil -} - -func (ts *tarSum) initTarSum() error { - ts.bufTar = bytes.NewBuffer([]byte{}) - ts.bufWriter = bytes.NewBuffer([]byte{}) - ts.tarR = tar.NewReader(ts.Reader) - ts.tarW = tar.NewWriter(ts.bufTar) - if !ts.DisableCompression { - ts.writer = gzip.NewWriter(ts.bufWriter) - } else { - ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} - } - if ts.tHash == nil { - ts.tHash = DefaultTHash - } - ts.h = ts.tHash.Hash() - ts.h.Reset() - ts.first = true - ts.sums = FileInfoSums{} - return nil -} - -func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.finished { - return ts.bufWriter.Read(buf) - } - if len(ts.bufData) < len(buf) { - switch { - case len(buf) <= buf8K: - ts.bufData = make([]byte, buf8K) - case len(buf) <= buf16K: - ts.bufData = make([]byte, buf16K) - case len(buf) <= buf32K: - ts.bufData = make([]byte, buf32K) - default: - ts.bufData = make([]byte, len(buf)) - } - } - buf2 := ts.bufData[:len(buf)] - - n, err := ts.tarR.Read(buf2) - if err != nil { - if err == io.EOF { - if _, err := ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - if !ts.first { - ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) - ts.fileCounter++ - ts.h.Reset() - } else { - ts.first = false - } - - currentHeader, err := ts.tarR.Next() - if err != nil { - if err == io.EOF { - if err := ts.tarW.Close(); err != nil { - return 0, err - } - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - if err := ts.writer.Close(); err != nil { - return 0, err - } - ts.finished = true - return n, nil - } - return n, err - } - ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") - if err := ts.encodeHeader(currentHeader); err != nil { - return 0, err - } - if err := ts.tarW.WriteHeader(currentHeader); err != nil { - return 0, err - } - if _, err := ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) - } - return n, err - } - - // Filling the hash buffer - if _, err = ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - - // Filling the tar writter - if _, err = ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - - // Filling the output writer - if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) -} - -func (ts *tarSum) Sum(extra []byte) string { - ts.sums.SortBySums() - h := ts.tHash.Hash() - if extra != nil { - h.Write(extra) - } - for _, fis := range ts.sums { - h.Write([]byte(fis.Sum())) - } - checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) - return checksum -} - -func (ts *tarSum) GetSums() FileInfoSums { - return ts.sums -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md deleted file mode 100644 index 7a6f8edc..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md +++ /dev/null @@ -1,225 +0,0 @@ -page_title: TarSum checksum specification -page_description: Documentation for algorithms used in the TarSum checksum calculation -page_keywords: docker, checksum, validation, tarsum - -# TarSum Checksum Specification - -## Abstract - -This document describes the algorithms used in performing the TarSum checksum -calculation on filesystem layers, the need for this method over existing -methods, and the versioning of this calculation. - - -## Introduction - -The transportation of filesystems, regarding Docker, is done with tar(1) -archives. There are a variety of tar serialization formats [2], and a key -concern here is ensuring a repeatable checksum given a set of inputs from a -generic tar archive. Types of transportation include distribution to and from a -registry endpoint, saving and loading through commands or Docker daemon APIs, -transferring the build context from client to Docker daemon, and committing the -filesystem of a container to become an image. - -As tar archives are used for transit, but not preserved in many situations, the -focus of the algorithm is to ensure the integrity of the preserved filesystem, -while maintaining a deterministic accountability. This includes neither -constraining the ordering or manipulation of the files during the creation or -unpacking of the archive, nor include additional metadata state about the file -system attributes. - -## Intended Audience - -This document is outlining the methods used for consistent checksum calculation -for filesystems transported via tar archives. - -Auditing these methodologies is an open and iterative process. This document -should accommodate the review of source code. Ultimately, this document should -be the starting point of further refinements to the algorithm and its future -versions. - -## Concept - -The checksum mechanism must ensure the integrity and assurance of the -filesystem payload. - -## Checksum Algorithm Profile - -A checksum mechanism must define the following operations and attributes: - -* Associated hashing cipher - used to checksum each file payload and attribute - information. -* Checksum list - each file of the filesystem archive has its checksum - calculated from the payload and attributes of the file. The final checksum is - calculated from this list, with specific ordering. -* Version - as the algorithm adapts to requirements, there are behaviors of the - algorithm to manage by versioning. -* Archive being calculated - the tar archive having its checksum calculated - -## Elements of TarSum checksum - -The calculated sum output is a text string. The elements included in the output -of the calculated sum comprise the information needed for validation of the sum -(TarSum version and hashing cipher used) and the expected checksum in hexadecimal -form. - -There are two delimiters used: -* '+' separates TarSum version from hashing cipher -* ':' separates calculation mechanics from expected hash - -Example: - -``` - "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" - | | \ | - | | \ | - |_version_|_cipher__|__ | - | \ | - |_calculation_mechanics_|______________________expected_sum_______________________| -``` - -## Versioning - -Versioning was introduced [0] to accommodate differences in calculation needed, -and ability to maintain reverse compatibility. - -The general algorithm will be describe further in the 'Calculation'. - -### Version0 - -This is the initial version of TarSum. - -Its element in the TarSum checksum string is `tarsum`. - -### Version1 - -Its element in the TarSum checksum is `tarsum.v1`. - -The notable changes in this version: -* Exclusion of file `mtime` from the file information headers, in each file - checksum calculation -* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax - tar file info headers) keys and values in each file checksum calculation - -### VersionDev - -*Do not use unless validating refinements to the checksum algorithm* - -Its element in the TarSum checksum is `tarsum.dev`. - -This is a floating place holder for a next version and grounds for testing -changes. The methods used for calculation are subject to change without notice, -and this version is for testing and not for production use. - -## Ciphers - -The official default and standard hashing cipher used in the calculation mechanic -is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. - -Though the TarSum algorithm itself is not exclusively bound to the single -hashing cipher `sha256`, support for alternate hashing ciphers was later added -[1]. Use cases for alternate cipher could include future-proofing TarSum -checksum format and using faster cipher hashes for tar filesystem checksums. - -## Calculation - -### Requirement - -As mentioned earlier, the calculation is such that it takes into consideration -the lifecycle of the tar archive. In that the tar archive is not an immutable, -permanent artifact. Otherwise options like relying on a known hashing cipher -checksum of the archive itself would be reliable enough. The tar archive of the -filesystem is used as a transportation medium for Docker images, and the -archive is discarded once its contents are extracted. Therefore, for consistent -validation items such as order of files in the tar archive and time stamps are -subject to change once an image is received. - -### Process - -The method is typically iterative due to reading tar info headers from the -archive stream, though this is not a strict requirement. - -#### Files - -Each file in the tar archive have their contents (headers and body) checksummed -individually using the designated associated hashing cipher. The ordered -headers of the file are written to the checksum calculation first, and then the -payload of the file body. - -The resulting checksum of the file is appended to the list of file sums. The -sum is encoded as a string of the hexadecimal digest. Additionally, the file -name and position in the archive is kept as reference for special ordering. - -#### Headers - -The following headers are read, in this -order ( and the corresponding representation of its value): -* 'name' - string -* 'mode' - string of the base10 integer -* 'uid' - string of the integer -* 'gid' - string of the integer -* 'size' - string of the integer -* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC -* 'typeflag' - string of the char -* 'linkname' - string -* 'uname' - string -* 'gname' - string -* 'devmajor' - string of the integer -* 'devminor' - string of the integer - -For >= Version1, the extented attribute headers ("SCHILY.xattr." prefixed pax -headers) included after the above list. These xattrs key/values are first -sorted by the keys. - -#### Header Format - -The ordered headers are written to the hash in the format of - - "{.key}{.value}" - -with no newline. - -#### Body - -After the order headers of the file have been added to the checksum for the -file, the body of the file is written to the hash. - -#### List of file sums - -The list of file sums is sorted by the string of the hexadecimal digest. - -If there are two files in the tar with matching paths, the order of occurrence -for that path is reflected for the sums of the corresponding file header and -body. - -#### Final Checksum - -Begin with a fresh or initial state of the associated hash cipher. If there is -additional payload to include in the TarSum calculation for the archive, it is -written first. Then each checksum from the ordered list of file sums is written -to the hash. - -The resulting digest is formatted per the Elements of TarSum checksum, -including the TarSum version, the associated hash cipher and the hexadecimal -encoded checksum digest. - -## Security Considerations - -The initial version of TarSum has undergone one update that could invalidate -handcrafted tar archives. The tar archive format supports appending of files -with same names as prior files in the archive. The latter file will clobber the -prior file of the same path. Due to this the algorithm now accounts for files -with matching paths, and orders the list of file sums accordingly [3]. - -## Footnotes - -* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 -* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e -* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 -* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 - -## Acknowledgements - -Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the -TarSum calculation. - diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go deleted file mode 100644 index 89626660..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go +++ /dev/null @@ -1,648 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "testing" -) - -type testLayer struct { - filename string - options *sizedOptions - jsonfile string - gzip bool - tarsum string - version Version - hash THash -} - -var testLayers = []testLayer{ - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - version: Version0, - tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - version: VersionDev, - tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - gzip: true, - tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, - { - // Tests existing version of TarSum when xattrs are present - filename: "testdata/xattr/layer.tar", - jsonfile: "testdata/xattr/json", - version: Version0, - tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, - { - // Tests next version of TarSum when xattrs are present - filename: "testdata/xattr/layer.tar", - jsonfile: "testdata/xattr/json", - version: VersionDev, - tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, - { - filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", - jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", - tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, - { - // this tar has two files with the same path - filename: "testdata/collision/collision-0.tar", - tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, - { - // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above - filename: "testdata/collision/collision-1.tar", - tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, - { - // this tar has newer of collider-0.tar, ensuring is has different hash - filename: "testdata/collision/collision-2.tar", - tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, - { - // this tar has newer of collider-1.tar, ensuring is has different hash - filename: "testdata/collision/collision-3.tar", - tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", - hash: md5THash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", - hash: sha1Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", - hash: sha224Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", - hash: sha384Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", - hash: sha512Hash, - }, -} - -type sizedOptions struct { - num int64 - size int64 - isRand bool - realFile bool -} - -// make a tar: -// * num is the number of files the tar should have -// * size is the bytes per file -// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) -// * realFile will write to a TempFile, instead of an in memory buffer -func sizedTar(opts sizedOptions) io.Reader { - var ( - fh io.ReadWriter - err error - ) - if opts.realFile { - fh, err = ioutil.TempFile("", "tarsum") - if err != nil { - return nil - } - } else { - fh = bytes.NewBuffer([]byte{}) - } - tarW := tar.NewWriter(fh) - defer tarW.Close() - for i := int64(0); i < opts.num; i++ { - err := tarW.WriteHeader(&tar.Header{ - Name: fmt.Sprintf("/testdata%d", i), - Mode: 0755, - Uid: 0, - Gid: 0, - Size: opts.size, - }) - if err != nil { - return nil - } - var rBuf []byte - if opts.isRand { - rBuf = make([]byte, 8) - _, err = rand.Read(rBuf) - if err != nil { - return nil - } - } else { - rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} - } - - for i := int64(0); i < opts.size/int64(8); i++ { - tarW.Write(rBuf) - } - } - return fh -} - -func emptyTarSum(gzip bool) (TarSum, error) { - reader, writer := io.Pipe() - tarWriter := tar.NewWriter(writer) - - // Immediately close tarWriter and write-end of the - // Pipe in a separate goroutine so we don't block. - go func() { - tarWriter.Close() - writer.Close() - }() - - return NewTarSum(reader, !gzip, Version0) -} - -// Test errors on NewTarsumForLabel -func TestNewTarSumForLabelInvalid(t *testing.T) { - reader := strings.NewReader("") - - if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } - - if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } - if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } -} - -func TestNewTarSumForLabel(t *testing.T) { - - layer := testLayers[0] - - reader, err := os.Open(layer.filename) - if err != nil { - t.Fatal(err) - } - label := strings.Split(layer.tarsum, ":")[0] - ts, err := NewTarSumForLabel(reader, false, label) - if err != nil { - t.Fatal(err) - } - - // Make sure it actually worked by reading a little bit of it - nbByteToRead := 8 * 1024 - dBuf := make([]byte, nbByteToRead) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) - } -} - -// TestEmptyTar tests that tarsum does not fail to read an empty tar -// and correctly returns the hex digest of an empty hash. -func TestEmptyTar(t *testing.T) { - // Test without gzip. - ts, err := emptyTarSum(false) - if err != nil { - t.Fatal(err) - } - - zeroBlock := make([]byte, 1024) - buf := new(bytes.Buffer) - - n, err := io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } - - if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { - t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) - } - - expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) - resultSum := ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } - - // Test with gzip. - ts, err = emptyTarSum(true) - if err != nil { - t.Fatal(err) - } - buf.Reset() - - n, err = io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } - - bufgz := new(bytes.Buffer) - gz := gzip.NewWriter(bufgz) - n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) - gz.Close() - gzBytes := bufgz.Bytes() - - if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { - t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) - } - - resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } - - // Test without ever actually writing anything. - if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { - t.Fatal(err) - } - - resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } -} - -var ( - md5THash = NewTHash("md5", md5.New) - sha1Hash = NewTHash("sha1", sha1.New) - sha224Hash = NewTHash("sha224", sha256.New224) - sha384Hash = NewTHash("sha384", sha512.New384) - sha512Hash = NewTHash("sha512", sha512.New) -) - -// Test all the build-in read size : buf8K, buf16K, buf32K and more -func TestTarSumsReadSize(t *testing.T) { - // Test always on the same layer (that is big enough) - layer := testLayers[0] - - for i := 0; i < 5; i++ { - - reader, err := os.Open(layer.filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, layer.version) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - nbByteToRead := (i + 1) * 8 * 1024 - dBuf := make([]byte, nbByteToRead) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) - continue - } - } -} - -func TestTarSums(t *testing.T) { - for _, layer := range testLayers { - var ( - fh io.Reader - err error - ) - if len(layer.filename) > 0 { - fh, err = os.Open(layer.filename) - if err != nil { - t.Errorf("failed to open %s: %s", layer.filename, err) - continue - } - } else if layer.options != nil { - fh = sizedTar(*layer.options) - } else { - // What else is there to test? - t.Errorf("what to do with %#v", layer) - continue - } - if file, ok := fh.(*os.File); ok { - defer file.Close() - } - - var ts TarSum - if layer.hash == nil { - // double negatives! - ts, err = NewTarSum(fh, !layer.gzip, layer.version) - } else { - ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) - } - if err != nil { - t.Errorf("%q :: %q", err, layer.filename) - continue - } - - // Read variable number of bytes to test dynamic buffer - dBuf := make([]byte, 1) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read 1B from %s: %s", layer.filename, err) - continue - } - dBuf = make([]byte, 16*1024) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) - continue - } - - // Read and discard remaining bytes - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to copy from %s: %s", layer.filename, err) - continue - } - var gotSum string - if len(layer.jsonfile) > 0 { - jfh, err := os.Open(layer.jsonfile) - if err != nil { - t.Errorf("failed to open %s: %s", layer.jsonfile, err) - continue - } - buf, err := ioutil.ReadAll(jfh) - if err != nil { - t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) - continue - } - gotSum = ts.Sum(buf) - } else { - gotSum = ts.Sum(nil) - } - - if layer.tarsum != gotSum { - t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) - } - var expectedHashName string - if layer.hash != nil { - expectedHashName = layer.hash.Name() - } else { - expectedHashName = DefaultTHash.Name() - } - if expectedHashName != ts.Hash().Name() { - t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) - } - } -} - -func TestIteration(t *testing.T) { - headerTests := []struct { - expectedSum string // TODO(vbatts) it would be nice to get individual sums of each - version Version - hdr *tar.Header - data []byte - }{ - { - "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", - Version0, - &tar.Header{ - Name: "file.txt", - Size: 0, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte(""), - }, - { - "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", - VersionDev, - &tar.Header{ - Name: "file.txt", - Size: 0, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte(""), - }, - { - "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", - VersionDev, - &tar.Header{ - Name: "another.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte("test"), - }, - { - "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", - VersionDev, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.key1": "value1", - "user.key2": "value2", - }, - }, - []byte("test"), - }, - { - "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", - VersionDev, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.KEY1": "value1", // adding different case to ensure different sum - "user.key2": "value2", - }, - }, - []byte("test"), - }, - { - "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", - Version0, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.NOT": "CALCULATED", - }, - }, - []byte("test"), - }, - } - for _, htest := range headerTests { - s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) - if err != nil { - t.Fatal(err) - } - - if s != htest.expectedSum { - t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) - } - } - -} - -func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { - buf := bytes.NewBuffer(nil) - // first build our test tar - tw := tar.NewWriter(buf) - if err := tw.WriteHeader(h); err != nil { - return "", err - } - if _, err := tw.Write(data); err != nil { - return "", err - } - tw.Close() - - ts, err := NewTarSum(buf, true, v) - if err != nil { - return "", err - } - tr := tar.NewReader(ts) - for { - hdr, err := tr.Next() - if hdr == nil || err == io.EOF { - // Signals the end of the archive. - break - } - if err != nil { - return "", err - } - if _, err = io.Copy(ioutil.Discard, tr); err != nil { - return "", err - } - } - return ts.Sum(nil), nil -} - -func Benchmark9kTar(b *testing.B) { - buf := bytes.NewBuffer([]byte{}) - fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") - if err != nil { - b.Error(err) - return - } - n, err := io.Copy(buf, fh) - fh.Close() - - reader := bytes.NewReader(buf.Bytes()) - - b.SetBytes(n) - b.ResetTimer() - for i := 0; i < b.N; i++ { - reader.Seek(0, 0) - ts, err := NewTarSum(reader, true, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - } -} - -func Benchmark9kTarGzip(b *testing.B) { - buf := bytes.NewBuffer([]byte{}) - fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") - if err != nil { - b.Error(err) - return - } - n, err := io.Copy(buf, fh) - fh.Close() - - reader := bytes.NewReader(buf.Bytes()) - - b.SetBytes(n) - b.ResetTimer() - for i := 0; i < b.N; i++ { - reader.Seek(0, 0) - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - } -} - -// this is a single big file in the tar archive -func Benchmark1mbSingleFileTar(b *testing.B) { - benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) -} - -// this is a single big file in the tar archive -func Benchmark1mbSingleFileTarGzip(b *testing.B) { - benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) -} - -// this is 1024 1k files in the tar archive -func Benchmark1kFilesTar(b *testing.B) { - benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) -} - -// this is 1024 1k files in the tar archive -func Benchmark1kFilesTarGzip(b *testing.B) { - benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) -} - -func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { - var fh *os.File - tarReader := sizedTar(opts) - if br, ok := tarReader.(*os.File); ok { - fh = br - } - defer os.Remove(fh.Name()) - defer fh.Close() - - b.SetBytes(opts.size * opts.num) - b.ResetTimer() - for i := 0; i < b.N; i++ { - ts, err := NewTarSum(fh, !isGzip, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - fh.Seek(0, 0) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json deleted file mode 100644 index 48e2af34..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json +++ /dev/null @@ -1 +0,0 @@ -{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar deleted file mode 100644 index dfd5c204aea77673f13fdd2f81cb4af1c155c00c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar deleted file mode 100644 index 7b5c04a9644808851fcccab5c3c240bf342abd93..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go deleted file mode 100644 index 3cdc6dda..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go +++ /dev/null @@ -1,150 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "errors" - "sort" - "strconv" - "strings" -) - -// versioning of the TarSum algorithm -// based on the prefix of the hash used -// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" -type Version int - -// Prefix of "tarsum" -const ( - Version0 Version = iota - Version1 - // NOTE: this variable will be either the latest or an unsettled next-version of the TarSum calculation - VersionDev -) - -// VersionLabelForChecksum returns the label for the given tarsum -// checksum, i.e., everything before the first `+` character in -// the string or an empty string if no label separator is found. -func VersionLabelForChecksum(checksum string) string { - // Checksums are in the form: {versionLabel}+{hashID}:{hex} - sepIndex := strings.Index(checksum, "+") - if sepIndex < 0 { - return "" - } - return checksum[:sepIndex] -} - -// Get a list of all known tarsum Version -func GetVersions() []Version { - v := []Version{} - for k := range tarSumVersions { - v = append(v, k) - } - return v -} - -var ( - tarSumVersions = map[Version]string{ - Version0: "tarsum", - Version1: "tarsum.v1", - VersionDev: "tarsum.dev", - } - tarSumVersionsByName = map[string]Version{ - "tarsum": Version0, - "tarsum.v1": Version1, - "tarsum.dev": VersionDev, - } -) - -func (tsv Version) String() string { - return tarSumVersions[tsv] -} - -// GetVersionFromTarsum returns the Version from the provided string -func GetVersionFromTarsum(tarsum string) (Version, error) { - tsv := tarsum - if strings.Contains(tarsum, "+") { - tsv = strings.SplitN(tarsum, "+", 2)[0] - } - for v, s := range tarSumVersions { - if s == tsv { - return v, nil - } - } - return -1, ErrNotVersion -} - -// Errors that may be returned by functions in this package -var ( - ErrNotVersion = errors.New("string does not include a TarSum Version") - ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") -) - -// tarHeaderSelector is the interface which different versions -// of tarsum should use for selecting and ordering tar headers -// for each item in the archive. -type tarHeaderSelector interface { - selectHeaders(h *tar.Header) (orderedHeaders [][2]string) -} - -type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) - -func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { - return f(h) -} - -func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - return [][2]string{ - {"name", h.Name}, - {"mode", strconv.Itoa(int(h.Mode))}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.Itoa(int(h.Size))}, - {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.Itoa(int(h.Devmajor))}, - {"devminor", strconv.Itoa(int(h.Devminor))}, - } -} - -func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - xAttrKeys = append(xAttrKeys, k) - } - sort.Strings(xAttrKeys) - - // Make the slice with enough capacity to hold the 11 basic headers - // we want from the v0 selector plus however many xattrs we have. - orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) - - // Copy all headers from v0 excluding the 'mtime' header (the 5th element). - v0headers := v0TarHeaderSelect(h) - orderedHeaders = append(orderedHeaders, v0headers[0:5]...) - orderedHeaders = append(orderedHeaders, v0headers[6:]...) - - // Finally, append the sorted xattrs. - for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) - } - - return -} - -var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ - Version0: v0TarHeaderSelect, - Version1: v1TarHeaderSelect, - VersionDev: v1TarHeaderSelect, -} - -func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { - headerSelector, ok := registeredHeaderSelectors[v] - if !ok { - return nil, ErrVersionNotImplemented - } - - return headerSelector, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go deleted file mode 100644 index 88e0a578..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package tarsum - -import ( - "testing" -) - -func TestVersionLabelForChecksum(t *testing.T) { - version := VersionLabelForChecksum("tarsum+sha256:deadbeef") - if version != "tarsum" { - t.Fatalf("Version should have been 'tarsum', was %v", version) - } - version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") - if version != "tarsum.v1" { - t.Fatalf("Version should have been 'tarsum.v1', was %v", version) - } - version = VersionLabelForChecksum("something+somethingelse") - if version != "something" { - t.Fatalf("Version should have been 'something', was %v", version) - } - version = VersionLabelForChecksum("invalidChecksum") - if version != "" { - t.Fatalf("Version should have been empty, was %v", version) - } -} - -func TestVersion(t *testing.T) { - expected := "tarsum" - var v Version - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } - - expected = "tarsum.v1" - v = 1 - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } - - expected = "tarsum.dev" - v = 2 - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } -} - -func TestGetVersion(t *testing.T) { - testSet := []struct { - Str string - Expected Version - }{ - {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, - {"tarsum+sha256", Version0}, - {"tarsum", Version0}, - {"tarsum.dev", VersionDev}, - {"tarsum.dev+sha256:deadbeef", VersionDev}, - } - - for _, ts := range testSet { - v, err := GetVersionFromTarsum(ts.Str) - if err != nil { - t.Fatalf("%q : %s", err, ts.Str) - } - if v != ts.Expected { - t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) - } - } - - // test one that does not exist, to ensure it errors - str := "weak+md5:abcdeabcde" - _, err := GetVersionFromTarsum(str) - if err != ErrNotVersion { - t.Fatalf("%q : %s", err, str) - } -} - -func TestGetVersions(t *testing.T) { - expected := []Version{ - Version0, - Version1, - VersionDev, - } - versions := GetVersions() - if len(versions) != len(expected) { - t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) - } - if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { - t.Fatalf("Expected [%v], got [%v]", expected, versions) - } -} - -func containsVersion(versions []Version, version Version) bool { - for _, v := range versions { - if v == version { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go deleted file mode 100644 index 9727ecde..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go +++ /dev/null @@ -1,22 +0,0 @@ -package tarsum - -import ( - "io" -) - -type writeCloseFlusher interface { - io.WriteCloser - Flush() error -} - -type nopCloseFlusher struct { - io.Writer -} - -func (n *nopCloseFlusher) Close() error { - return nil -} - -func (n *nopCloseFlusher) Flush() error { - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md deleted file mode 100644 index 05be0f8a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to libtrust - -Want to hack on libtrust? Awesome! Here are instructions to get you -started. - -libtrust is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE deleted file mode 100644 index 27448585..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS deleted file mode 100644 index 9768175f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Solomon Hykes -Josh Hawn (github: jlhawn) -Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/README.md deleted file mode 100644 index 8e7db381..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# libtrust - -Libtrust is library for managing authentication and authorization using public key cryptography. - -Authentication is handled using the identity attached to the public key. -Libtrust provides multiple methods to prove possession of the private key associated with an identity. - - TLS x509 certificates - - Signature verification - - Key Challenge - -Authorization and access control is managed through a distributed trust graph. -Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go deleted file mode 100644 index 3dcca33c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net" - "time" -) - -type certTemplateInfo struct { - commonName string - domains []string - ipAddresses []net.IP - isCA bool - clientAuth bool - serverAuth bool -} - -func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { - // Generate a certificate template which is valid from the past week to - // 10 years from now. The usage of the certificate depends on the - // specified fields in the given certTempInfo object. - var ( - keyUsage x509.KeyUsage - extKeyUsage []x509.ExtKeyUsage - ) - - if info.isCA { - keyUsage = x509.KeyUsageCertSign - } - - if info.clientAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) - } - - if info.serverAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) - } - - return &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: info.commonName, - }, - NotBefore: time.Now().Add(-time.Hour * 24 * 7), - NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), - DNSNames: info.domains, - IPAddresses: info.ipAddresses, - IsCA: info.isCA, - KeyUsage: keyUsage, - ExtKeyUsage: extKeyUsage, - BasicConstraintsValid: info.isCA, - } -} - -func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { - pubCertTemplate := generateCertTemplate(subInfo) - privCertTemplate := generateCertTemplate(issInfo) - - certDER, err := x509.CreateCertificate( - rand.Reader, pubCertTemplate, privCertTemplate, - pub.CryptoPublicKey(), priv.CryptoPrivateKey(), - ) - if err != nil { - return nil, fmt.Errorf("failed to create certificate: %s", err) - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, fmt.Errorf("failed to parse certificate: %s", err) - } - - return -} - -// GenerateSelfSignedServerCert creates a self-signed certificate for the -// given key which is to be used for TLS servers with the given domains and -// IP addresses. -func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - domains: domains, - ipAddresses: ipAddresses, - serverAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateSelfSignedClientCert creates a self-signed certificate for the -// given key which is to be used for TLS clients. -func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - clientAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateCACert creates a certificate which can be used as a trusted -// certificate authority. -func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { - subjectInfo := &certTemplateInfo{ - commonName: trustedKey.KeyID(), - isCA: true, - } - issuerInfo := &certTemplateInfo{ - commonName: signer.KeyID(), - } - - return generateCert(trustedKey, signer, subjectInfo, issuerInfo) -} - -// GenerateCACertPool creates a certificate authority pool to be used for a -// TLS configuration. Any self-signed certificates issued by the specified -// trusted keys will be verified during a TLS handshake -func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, trustedKey := range trustedKeys { - cert, err := GenerateCACert(signer, trustedKey) - if err != nil { - return nil, fmt.Errorf("failed to generate CA certificate: %s", err) - } - - certPool.AddCert(cert) - } - - return certPool, nil -} - -// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - certificates := []*x509.Certificate{} - var block *pem.Block - block, b = pem.Decode(b) - for ; block != nil; block, b = pem.Decode(b) { - if block.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } else { - return nil, fmt.Errorf("invalid pem block type: %s", block.Type) - } - } - - return certificates, nil -} - -// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificatePool(filename string) (*x509.CertPool, error) { - certs, err := LoadCertificateBundle(filename) - if err != nil { - return nil, err - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go deleted file mode 100644 index c111f353..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package libtrust - -import ( - "encoding/pem" - "io/ioutil" - "net" - "os" - "path" - "testing" -) - -func TestGenerateCertificates(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}) - if err != nil { - t.Fatal(err) - } - - _, err = GenerateSelfSignedClientCert(key) - if err != nil { - t.Fatal(err) - } -} - -func TestGenerateCACertPool(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()}) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadCertificates(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - caKey2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - cert1, err := GenerateCACert(caKey1, key) - if err != nil { - t.Fatal(err) - } - cert2, err := GenerateCACert(caKey2, key) - if err != nil { - t.Fatal(err) - } - - d, err := ioutil.TempDir("/tmp", "cert-test") - if err != nil { - t.Fatal(err) - } - caFile := path.Join(d, "ca.pem") - f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - t.Fatal(err) - } - - err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw}) - if err != nil { - t.Fatal(err) - } - err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw}) - if err != nil { - t.Fatal(err) - } - f.Close() - - certs, err := LoadCertificateBundle(caFile) - if err != nil { - t.Fatal(err) - } - if len(certs) != 2 { - t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs)) - } - - pool, err := LoadCertificatePool(caFile) - if err != nil { - t.Fatal(err) - } - - if len(pool.Subjects()) != 2 { - t.Fatalf("Invalid certificate pool") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/doc.go deleted file mode 100644 index ec5d2159..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package libtrust provides an interface for managing authentication and -authorization using public key cryptography. Authentication is handled -using the identity attached to the public key and verified through TLS -x509 certificates, a key challenge, or signature. Authorization and -access control is managed through a trust graph distributed between -both remote trust servers and locally cached and managed data. -*/ -package libtrust diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go deleted file mode 100644 index 00bbe4b3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go +++ /dev/null @@ -1,428 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * EC DSA PUBLIC KEY - */ - -// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital -// signature algorithms. -type ecPublicKey struct { - *ecdsa.PublicKey - curveName string - signatureAlgorithm *signatureAlgorithm - extended map[string]interface{} -} - -func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { - curve := cryptoPublicKey.Curve - - switch { - case curve == elliptic.P256(): - return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil - case curve == elliptic.P384(): - return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil - case curve == elliptic.P521(): - return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil - default: - return nil, errors.New("unsupported elliptic curve") - } -} - -// KeyType returns the key type for elliptic curve keys, i.e., "EC". -func (k *ecPublicKey) KeyType() string { - return "EC" -} - -// CurveName returns the elliptic curve identifier. -// Possible values are "P-256", "P-384", and "P-521". -func (k *ecPublicKey) CurveName() string { - return k.curveName -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *ecPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *ecPublicKey) String() string { - return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this -// PublicKey. The alg parameter should identify the digital signature -// algorithm which was used to produce the signature and should be supported -// by this public key. Returns a nil error if the signature is valid. -func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // For EC keys there is only one supported signature algorithm depending - // on the curve parameters. - if k.signatureAlgorithm.HeaderParam() != alg { - return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) - } - - // signature is the concatenation of (r, s), base64Url encoded. - sigLength := len(signature) - expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) - if sigLength != expectedOctetLength { - return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) - } - - rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] - r := new(big.Int).SetBytes(rBytes) - s := new(big.Int).SetBytes(sBytes) - - hasher := k.signatureAlgorithm.HashID().New() - _, err := io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - if !ecdsa.Verify(k.PublicKey, hash, r, s) { - return errors.New("invalid signature") - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *ecPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["crv"] = k.CurveName() - - xBytes := k.X.Bytes() - yBytes := k.Y.Bytes() - octetLength := (k.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output so that x, y are each - // *octetLength* bytes long. - xBuf := make([]byte, octetLength-len(xBytes), octetLength) - yBuf := make([]byte, octetLength-len(yBytes), octetLength) - xBuf = append(xBuf, xBytes...) - yBuf = append(yBuf, yBytes...) - - jwk["x"] = joseBase64UrlEncode(xBuf) - jwk["y"] = joseBase64UrlEncode(yBuf) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *ecPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { - // JWK key type (kty) has already been determined to be "EC". - // Need to extract 'crv', 'x', 'y', and 'kid' and check for - // consistency. - - // Get the curve identifier value. - crv, err := stringFromMap(jwk, "crv") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) - } - - var ( - curve elliptic.Curve - sigAlg *signatureAlgorithm - ) - - switch { - case crv == "P-256": - curve = elliptic.P256() - sigAlg = es256 - case crv == "P-384": - curve = elliptic.P384() - sigAlg = es384 - case crv == "P-521": - curve = elliptic.P521() - sigAlg = es512 - default: - return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) - } - - // Get the X and Y coordinates for the public key point. - xB64Url, err := stringFromMap(jwk, "x") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - x, err := parseECCoordinate(xB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - - yB64Url, err := stringFromMap(jwk, "y") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - y, err := parseECCoordinate(yB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - - key := &ecPublicKey{ - PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, - curveName: crv, signatureAlgorithm: sigAlg, - } - - // Key ID is optional too, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) - } - } - - key.extended = jwk - - return key, nil -} - -/* - * EC DSA PRIVATE KEY - */ - -// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature -// algorithms. -type ecPrivateKey struct { - ecPublicKey - *ecdsa.PrivateKey -} - -func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { - publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) - if err != nil { - return nil, err - } - - return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *ecPrivateKey) PublicKey() PublicKey { - return &k.ecPublicKey -} - -func (k *ecPrivateKey) String() string { - return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the elliptic curve private key. If the specified hashing algorithm is -// supported by this key, that hash function is used to generate the signature -// otherwise the the default hashing algorithm for this key is used. Returns -// the signature and the name of the JWK signature algorithm used, e.g., -// "ES256", "ES384", "ES512". -func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - // The given hashId is only a suggestion, and since EC keys only support - // on signature/hash algorithm given the curve name, we disregard it for - // the elliptic curve JWK signature implementation. - hasher := k.signatureAlgorithm.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - rBytes, sBytes := r.Bytes(), s.Bytes() - octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output - rBuf := make([]byte, octetLength-len(rBytes), octetLength) - sBuf := make([]byte, octetLength-len(sBytes), octetLength) - - rBuf = append(rBuf, rBytes...) - sBuf = append(sBuf, sBytes...) - - signature = append(rBuf, sBuf...) - alg = k.signatureAlgorithm.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *ecPrivateKey) toMap() map[string]interface{} { - jwk := k.ecPublicKey.toMap() - - dBytes := k.D.Bytes() - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := k.ecPublicKey.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - // Create a buffer with the necessary zero-padding. - dBuf := make([]byte, octetLength-len(dBytes), octetLength) - dBuf = append(dBuf, dBytes...) - - jwk["d"] = joseBase64UrlEncode(dBuf) - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) - } - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) -} - -func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { - dB64Url, err := stringFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key: %s", err) - } - - // JWK key type (kty) has already been determined to be "EC". - // Need to extract the public key information, then extract the private - // key value 'd'. - publicKey, err := ecPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - d, err := parseECPrivateParam(dB64Url, publicKey.Curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) - } - - key := &ecPrivateKey{ - ecPublicKey: *publicKey, - PrivateKey: &ecdsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: d, - }, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { - k = new(ecPrivateKey) - k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) - if err != nil { - return nil, err - } - - k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. -func GenerateECP256PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-256 key: %s", err) - } - - k.curveName = "P-256" - k.signatureAlgorithm = es256 - - return k, nil -} - -// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. -func GenerateECP384PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P384()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-384 key: %s", err) - } - - k.curveName = "P-384" - k.signatureAlgorithm = es384 - - return k, nil -} - -// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. -func GenerateECP521PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P521()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-521 key: %s", err) - } - - k.curveName = "P-521" - k.signatureAlgorithm = es512 - - return k, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go deleted file mode 100644 index 26ac3814..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package libtrust - -import ( - "bytes" - "encoding/json" - "testing" -) - -func generateECTestKeys(t *testing.T) []PrivateKey { - p256Key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - p384Key, err := GenerateECP384PrivateKey() - if err != nil { - t.Fatal(err) - } - - p521Key, err := GenerateECP521PrivateKey() - if err != nil { - t.Fatal(err) - } - - return []PrivateKey{p256Key, p384Key, p521Key} -} - -func TestECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - - for _, ecKey := range ecKeys { - if ecKey.KeyType() != "EC" { - t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType()) - } - } -} - -func TestECSignVerify(t *testing.T) { - ecKeys := generateECTestKeys(t) - - message := "Hello, World!" - data := bytes.NewReader([]byte(message)) - - sigAlgs := []*signatureAlgorithm{es256, es384, es512} - - for i, ecKey := range ecKeys { - sigAlg := sigAlgs[i] - - t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID()) - - data.Seek(0, 0) // Reset the byte reader - - // Sign - sig, alg, err := ecKey.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - - // Verify - err = ecKey.Verify(data, alg, sig) - if err != nil { - t.Fatal(err) - } - } -} - -func TestMarshalUnmarshalECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) - sigAlgs := []*signatureAlgorithm{es256, es384, es512} - - for i, ecKey := range ecKeys { - sigAlg := sigAlgs[i] - privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ") - if err != nil { - t.Fatal(err) - } - - publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ") - if err != nil { - t.Fatal(err) - } - - t.Logf("JWK Private Key: %s", string(privateJWKJSON)) - t.Logf("JWK Public Key: %s", string(publicJWKJSON)) - - privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) - if err != nil { - t.Fatal(err) - } - - pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) - if err != nil { - t.Fatal(err) - } - - // Ensure we can sign/verify a message with the unmarshalled keys. - data.Seek(0, 0) // Reset the byte reader - signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - err = pubKey2.Verify(data, alg, signature) - if err != nil { - t.Fatal(err) - } - } -} - -func TestFromCryptoECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - - for _, ecKey := range ecKeys { - cryptoPrivateKey := ecKey.CryptoPrivateKey() - cryptoPublicKey := ecKey.CryptoPublicKey() - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - t.Fatal(err) - } - - if pubKey.KeyID() != ecKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - - privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) - if err != nil { - t.Fatal(err) - } - - if privKey.KeyID() != ecKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - } -} - -func TestExtendedFields(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - key.AddExtendedField("test", "foobar") - val := key.GetExtendedField("test") - - gotVal, ok := val.(string) - if !ok { - t.Fatalf("value is not a string") - } else if gotVal != val { - t.Fatalf("value %q is not equal to %q", gotVal, val) - } - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/filter.go deleted file mode 100644 index 5b2b4fca..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/filter.go +++ /dev/null @@ -1,50 +0,0 @@ -package libtrust - -import ( - "path/filepath" -) - -// FilterByHosts filters the list of PublicKeys to only those which contain a -// 'hosts' pattern which matches the given host. If *includeEmpty* is true, -// then keys which do not specify any hosts are also returned. -func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { - filtered := make([]PublicKey, 0, len(keys)) - - for _, pubKey := range keys { - var hosts []string - switch v := pubKey.GetExtendedField("hosts").(type) { - case []string: - hosts = v - case []interface{}: - for _, value := range v { - h, ok := value.(string) - if !ok { - continue - } - hosts = append(hosts, h) - } - } - - if len(hosts) == 0 { - if includeEmpty { - filtered = append(filtered, pubKey) - } - continue - } - - // Check if any hosts match pattern - for _, hostPattern := range hosts { - match, err := filepath.Match(hostPattern, host) - if err != nil { - return nil, err - } - - if match { - filtered = append(filtered, pubKey) - continue - } - } - } - - return filtered, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go deleted file mode 100644 index 997e554c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package libtrust - -import ( - "testing" -) - -func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) { - if len(sliceA) != len(sliceB) { - t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB)) - } - - for i, itemA := range sliceA { - itemB := sliceB[i] - if itemA != itemB { - t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB) - } - } -} - -func TestFilter(t *testing.T) { - keys := make([]PublicKey, 0, 8) - - // Create 8 keys and add host entries. - for i := 0; i < cap(keys); i++ { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - // we use both []interface{} and []string here because jwt uses - // []interface{} format, while PEM uses []string - switch { - case i == 0: - // Don't add entries for this key, key 0. - break - case i%2 == 0: - // Should catch keys 2, 4, and 6. - key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) - case i == 7: - // Should catch only the last key, and make it match any hostname. - key.AddExtendedField("hosts", []string{"*"}) - default: - // should catch keys 1, 3, 5. - key.AddExtendedField("hosts", []string{"*.example.com"}) - } - - keys = append(keys, key) - } - - // Should match 2 keys, the empty one, and the one that matches all hosts. - matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true) - if err != nil { - t.Fatal(err) - } - expectedMatch := []PublicKey{keys[0], keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match 1 key, the one that matches any host. - matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = []PublicKey{keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match keys that end in "example.com", and the key that matches anything. - matchedKeys, err = FilterByHosts(keys, "foo.example.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match all of the keys except the empty key. - matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = keys[1:] - compareKeySlices(t, expectedMatch, matchedKeys) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/hash.go deleted file mode 100644 index a2df787d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/hash.go +++ /dev/null @@ -1,56 +0,0 @@ -package libtrust - -import ( - "crypto" - _ "crypto/sha256" // Registrer SHA224 and SHA256 - _ "crypto/sha512" // Registrer SHA384 and SHA512 - "fmt" -) - -type signatureAlgorithm struct { - algHeaderParam string - hashID crypto.Hash -} - -func (h *signatureAlgorithm) HeaderParam() string { - return h.algHeaderParam -} - -func (h *signatureAlgorithm) HashID() crypto.Hash { - return h.hashID -} - -var ( - rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} - rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} - rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} - es256 = &signatureAlgorithm{"ES256", crypto.SHA256} - es384 = &signatureAlgorithm{"ES384", crypto.SHA384} - es512 = &signatureAlgorithm{"ES512", crypto.SHA512} -) - -func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { - switch { - case alg == "RS256": - return rs256, nil - case alg == "RS384": - return rs384, nil - case alg == "RS512": - return rs512, nil - default: - return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) - } -} - -func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { - switch { - case hashID == crypto.SHA512: - return rs512 - case hashID == crypto.SHA384: - return rs384 - case hashID == crypto.SHA256: - fallthrough - default: - return rs256 - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go deleted file mode 100644 index cb2ca9a7..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go +++ /dev/null @@ -1,657 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "sort" - "time" - "unicode" -) - -var ( - // ErrInvalidSignContent is used when the content to be signed is invalid. - ErrInvalidSignContent = errors.New("invalid sign content") - - // ErrInvalidJSONContent is used when invalid json is encountered. - ErrInvalidJSONContent = errors.New("invalid json content") - - // ErrMissingSignatureKey is used when the specified signature key - // does not exist in the JSON content. - ErrMissingSignatureKey = errors.New("missing signature key") -) - -type jsHeader struct { - JWK PublicKey `json:"jwk,omitempty"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c,omitempty"` -} - -type jsSignature struct { - Header jsHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected,omitempty"` -} - -type jsSignaturesSorted []jsSignature - -func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } -func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } - -func (jsbkid jsSignaturesSorted) Less(i, j int) bool { - ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() - si, sj := jsbkid[i].Signature, jsbkid[j].Signature - - if ki == kj { - return si < sj - } - - return ki < kj -} - -type signKey struct { - PrivateKey - Chain []*x509.Certificate -} - -// JSONSignature represents a signature of a json object. -type JSONSignature struct { - payload string - signatures []jsSignature - indent string - formatLength int - formatTail []byte -} - -func newJSONSignature() *JSONSignature { - return &JSONSignature{ - signatures: make([]jsSignature, 0, 1), - } -} - -// Payload returns the encoded payload of the signature. This -// payload should not be signed directly -func (js *JSONSignature) Payload() ([]byte, error) { - return joseBase64UrlDecode(js.payload) -} - -func (js *JSONSignature) protectedHeader() (string, error) { - protected := map[string]interface{}{ - "formatLength": js.formatLength, - "formatTail": joseBase64UrlEncode(js.formatTail), - "time": time.Now().UTC().Format(time.RFC3339), - } - protectedBytes, err := json.Marshal(protected) - if err != nil { - return "", err - } - - return joseBase64UrlEncode(protectedBytes), nil -} - -func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { - buf := make([]byte, len(js.payload)+len(protectedHeader)+1) - copy(buf, protectedHeader) - buf[len(protectedHeader)] = '.' - copy(buf[len(protectedHeader)+1:], js.payload) - return buf, nil -} - -// Sign adds a signature using the given private key. -func (js *JSONSignature) Sign(key PrivateKey) error { - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - js.signatures = append(js.signatures, jsSignature{ - Header: jsHeader{ - JWK: key.PublicKey(), - Algorithm: algorithm, - }, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// SignWithChain adds a signature using the given private key -// and setting the x509 chain. The public key of the first element -// in the chain must be the public key corresponding with the sign key. -func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { - // Ensure key.Chain[0] is public key for key - //key.Chain.PublicKey - //key.PublicKey().CryptoPublicKey() - - // Verify chain - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - header := jsHeader{ - Chain: make([]string, len(chain)), - Algorithm: algorithm, - } - - for i, cert := range chain { - header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) - } - - js.signatures = append(js.signatures, jsSignature{ - Header: header, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// Verify verifies all the signatures and returns the list of -// public keys used to sign. Any x509 chains are not checked. -func (js *JSONSignature) Verify() ([]PublicKey, error) { - keys := make([]PublicKey, len(js.signatures)) - for i, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - } else if signature.Header.JWK != nil { - publicKey = signature.Header.JWK - } else { - return nil, errors.New("missing public key") - } - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - - keys[i] = publicKey - } - return keys, nil -} - -// VerifyChains verifies all the signatures and the chains associated -// with each signature and returns the list of verified chains. -// Signatures without an x509 chain are not checked. -func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { - chains := make([][]*x509.Certificate, 0, len(js.signatures)) - for _, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - intermediates := x509.NewCertPool() - if len(signature.Header.Chain) > 1 { - intermediateChain := signature.Header.Chain[1:] - for i := range intermediateChain { - certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) - if err != nil { - return nil, err - } - intermediate, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - intermediates.AddCert(intermediate) - } - } - - verifyOptions := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: ca, - } - - verifiedChains, err := cert.Verify(verifyOptions) - if err != nil { - return nil, err - } - chains = append(chains, verifiedChains...) - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - } - - } - return chains, nil -} - -// JWS returns JSON serialized JWS according to -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 -func (js *JSONSignature) JWS() ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("missing signature") - } - - sort.Sort(jsSignaturesSorted(js.signatures)) - - jsonMap := map[string]interface{}{ - "payload": js.payload, - "signatures": js.signatures, - } - - return json.MarshalIndent(jsonMap, "", " ") -} - -func notSpace(r rune) bool { - return !unicode.IsSpace(r) -} - -func detectJSONIndent(jsonContent []byte) (indent string) { - if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { - quoteIndex := bytes.IndexRune(jsonContent[1:], '"') - if quoteIndex > 0 { - indent = string(jsonContent[2 : quoteIndex+1]) - } - } - return -} - -type jsParsedHeader struct { - JWK json.RawMessage `json:"jwk"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c"` -} - -type jsParsedSignature struct { - Header jsParsedHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected"` -} - -// ParseJWS parses a JWS serialized JSON object into a Json Signature. -func ParseJWS(content []byte) (*JSONSignature, error) { - type jsParsed struct { - Payload string `json:"payload"` - Signatures []jsParsedSignature `json:"signatures"` - } - parsed := &jsParsed{} - err := json.Unmarshal(content, parsed) - if err != nil { - return nil, err - } - if len(parsed.Signatures) == 0 { - return nil, errors.New("missing signatures") - } - payload, err := joseBase64UrlDecode(parsed.Payload) - if err != nil { - return nil, err - } - - js, err := NewJSONSignature(payload) - if err != nil { - return nil, err - } - js.signatures = make([]jsSignature, len(parsed.Signatures)) - for i, signature := range parsed.Signatures { - header := jsHeader{ - Algorithm: signature.Header.Algorithm, - } - if signature.Header.Chain != nil { - header.Chain = signature.Header.Chain - } - if signature.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) - if err != nil { - return nil, err - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signature.Signature, - Protected: signature.Protected, - } - } - - return js, nil -} - -// NewJSONSignature returns a new unsigned JWS from a json byte array. -// JSONSignature will need to be signed before serializing or storing. -// Optionally, one or more signatures can be provided as byte buffers, -// containing serialized JWS signatures, to assemble a fully signed JWS -// package. It is the callers responsibility to ensure uniqueness of the -// provided signatures. -func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { - var dataMap map[string]interface{} - err := json.Unmarshal(content, &dataMap) - if err != nil { - return nil, err - } - - js := newJSONSignature() - js.indent = detectJSONIndent(content) - - js.payload = joseBase64UrlEncode(content) - - // Find trailing } and whitespace, put in protected header - closeIndex := bytes.LastIndexFunc(content, notSpace) - if content[closeIndex] != '}' { - return nil, ErrInvalidJSONContent - } - lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) - if content[lastRuneIndex] == ',' { - return nil, ErrInvalidJSONContent - } - js.formatLength = lastRuneIndex + 1 - js.formatTail = content[js.formatLength:] - - if len(signatures) > 0 { - for _, signature := range signatures { - var parsedJSig jsParsedSignature - - if err := json.Unmarshal(signature, &parsedJSig); err != nil { - return nil, err - } - - // TODO(stevvooe): A lot of the code below is repeated in - // ParseJWS. It will require more refactoring to fix that. - jsig := jsSignature{ - Header: jsHeader{ - Algorithm: parsedJSig.Header.Algorithm, - }, - Signature: parsedJSig.Signature, - Protected: parsedJSig.Protected, - } - - if parsedJSig.Header.Chain != nil { - jsig.Header.Chain = parsedJSig.Header.Chain - } - - if parsedJSig.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) - if err != nil { - return nil, err - } - jsig.Header.JWK = publicKey - } - - js.signatures = append(js.signatures, jsig) - } - } - - return js, nil -} - -// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or -// struct. JWS will need to be signed before serializing or storing. -func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { - switch content.(type) { - case map[string]interface{}: - case struct{}: - default: - return nil, errors.New("invalid data type") - } - - js := newJSONSignature() - js.indent = " " - - payload, err := json.MarshalIndent(content, "", js.indent) - if err != nil { - return nil, err - } - js.payload = joseBase64UrlEncode(payload) - - // Remove '\n}' from formatted section, put in protected header - js.formatLength = len(payload) - 2 - js.formatTail = payload[js.formatLength:] - - return js, nil -} - -func readIntFromMap(key string, m map[string]interface{}) (int, bool) { - value, ok := m[key] - if !ok { - return 0, false - } - switch v := value.(type) { - case int: - return v, true - case float64: - return int(v), true - default: - return 0, false - } -} - -func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { - value, ok := m[key] - if !ok { - return "", false - } - v, ok = value.(string) - return -} - -// ParsePrettySignature parses a formatted signature into a -// JSON signature. If the signatures are missing the format information -// an error is thrown. The formatted signature must be created by -// the same method as format signature. -func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { - var contentMap map[string]json.RawMessage - err := json.Unmarshal(content, &contentMap) - if err != nil { - return nil, fmt.Errorf("error unmarshalling content: %s", err) - } - sigMessage, ok := contentMap[signatureKey] - if !ok { - return nil, ErrMissingSignatureKey - } - - var signatureBlocks []jsParsedSignature - err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) - if err != nil { - return nil, fmt.Errorf("error unmarshalling signatures: %s", err) - } - - js := newJSONSignature() - js.signatures = make([]jsSignature, len(signatureBlocks)) - - for i, signatureBlock := range signatureBlocks { - protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) - if err != nil { - return nil, fmt.Errorf("base64 decode error: %s", err) - } - var protectedHeader map[string]interface{} - err = json.Unmarshal(protectedBytes, &protectedHeader) - if err != nil { - return nil, fmt.Errorf("error unmarshalling protected header: %s", err) - } - - formatLength, ok := readIntFromMap("formatLength", protectedHeader) - if !ok { - return nil, errors.New("missing formatted length") - } - encodedTail, ok := readStringFromMap("formatTail", protectedHeader) - if !ok { - return nil, errors.New("missing formatted tail") - } - formatTail, err := joseBase64UrlDecode(encodedTail) - if err != nil { - return nil, fmt.Errorf("base64 decode error on tail: %s", err) - } - if js.formatLength == 0 { - js.formatLength = formatLength - } else if js.formatLength != formatLength { - return nil, errors.New("conflicting format length") - } - if len(js.formatTail) == 0 { - js.formatTail = formatTail - } else if bytes.Compare(js.formatTail, formatTail) != 0 { - return nil, errors.New("conflicting format tail") - } - - header := jsHeader{ - Algorithm: signatureBlock.Header.Algorithm, - Chain: signatureBlock.Header.Chain, - } - if signatureBlock.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) - if err != nil { - return nil, fmt.Errorf("error unmarshalling public key: %s", err) - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signatureBlock.Signature, - Protected: signatureBlock.Protected, - } - } - if js.formatLength > len(content) { - return nil, errors.New("invalid format length") - } - formatted := make([]byte, js.formatLength+len(js.formatTail)) - copy(formatted, content[:js.formatLength]) - copy(formatted[js.formatLength:], js.formatTail) - js.indent = detectJSONIndent(formatted) - js.payload = joseBase64UrlEncode(formatted) - - return js, nil -} - -// PrettySignature formats a json signature into an easy to read -// single json serialized object. -func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("no signatures") - } - payload, err := joseBase64UrlDecode(js.payload) - if err != nil { - return nil, err - } - payload = payload[:js.formatLength] - - sort.Sort(jsSignaturesSorted(js.signatures)) - - var marshalled []byte - var marshallErr error - if js.indent != "" { - marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) - } else { - marshalled, marshallErr = json.Marshal(js.signatures) - } - if marshallErr != nil { - return nil, marshallErr - } - - buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) - buf.Write(payload) - buf.WriteByte(',') - if js.indent != "" { - buf.WriteByte('\n') - buf.WriteString(js.indent) - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\": ") - buf.Write(marshalled) - buf.WriteByte('\n') - } else { - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\":") - buf.Write(marshalled) - } - buf.WriteByte('}') - - return buf.Bytes(), nil -} - -// Signatures provides the signatures on this JWS as opaque blobs, sorted by -// keyID. These blobs can be stored and reassembled with payloads. Internally, -// they are simply marshaled json web signatures but implementations should -// not rely on this. -func (js *JSONSignature) Signatures() ([][]byte, error) { - sort.Sort(jsSignaturesSorted(js.signatures)) - - var sb [][]byte - for _, jsig := range js.signatures { - p, err := json.Marshal(jsig) - if err != nil { - return nil, err - } - - sb = append(sb, p) - } - - return sb, nil -} - -// Merge combines the signatures from one or more other signatures into the -// method receiver. If the payloads differ for any argument, an error will be -// returned and the receiver will not be modified. -func (js *JSONSignature) Merge(others ...*JSONSignature) error { - merged := js.signatures - for _, other := range others { - if js.payload != other.payload { - return fmt.Errorf("payloads differ from merge target") - } - merged = append(merged, other.signatures...) - } - - js.signatures = merged - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go deleted file mode 100644 index b4f26979..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go +++ /dev/null @@ -1,380 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto/rand" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "testing" - - "github.com/docker/libtrust/testutil" -) - -func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) { - testMap := map[string]interface{}{ - "name": "dmcgowan/mycontainer", - "config": map[string]interface{}{ - "ports": []int{9101, 9102}, - "run": "/bin/echo \"Hello\"", - }, - "layers": []string{ - "2893c080-27f5-11e4-8c21-0800200c9a66", - "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55", - "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4", - "0b6da891-7f7f-4abf-9c97-7887549e696c", - "1d960389-ae4f-4011-85fd-18d0f96a67ad", - }, - } - formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{` - formattedSection = fmt.Sprintf(formattedSection, sigKey) - if indent != "" { - buf := bytes.NewBuffer(nil) - json.Indent(buf, []byte(formattedSection), "", indent) - return testMap, buf.Bytes() - } - return testMap, []byte(formattedSection) - -} - -func TestSignJSON(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, _ := createTestJSON("buildSignatures", " ") - indented, err := json.MarshalIndent(testMap, "", " ") - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(indented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing content: %s", err) - } - - keys, err := js.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } - -} - -func TestSignMap(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, _ := createTestJSON("buildSignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing JSON signature: %s", err) - } - - keys, err := js.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } -} - -func TestFormattedJson(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, firstSection := createTestJSON("buildSignatures", " ") - indented, err := json.MarshalIndent(testMap, "", " ") - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(indented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing content: %s", err) - } - - b, err := js.PrettySignature("buildSignatures") - if err != nil { - t.Fatalf("Error signing map: %s", err) - } - - if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { - t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) - } - - parsed, err := ParsePrettySignature(b, "buildSignatures") - if err != nil { - t.Fatalf("Error parsing formatted signature: %s", err) - } - - keys, err := parsed.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } - - var unmarshalled map[string]interface{} - err = json.Unmarshal(b, &unmarshalled) - if err != nil { - t.Fatalf("Could not unmarshall after parse: %s", err) - } - -} - -func TestFormattedFlatJson(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, firstSection := createTestJSON("buildSignatures", "") - unindented, err := json.Marshal(testMap) - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(unindented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing JSON signature: %s", err) - } - - b, err := js.PrettySignature("buildSignatures") - if err != nil { - t.Fatalf("Error signing map: %s", err) - } - - if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { - t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) - } - - parsed, err := ParsePrettySignature(b, "buildSignatures") - if err != nil { - t.Fatalf("Error parsing formatted signature: %s", err) - } - - keys, err := parsed.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } -} - -func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) { - parent := ca - parentKey := key - chain := make([]*x509.Certificate, 6) - for i := 5; i > 0; i-- { - intermediatekey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generating intermdiate certificate: %s", err) - } - parent = chain[i] - parentKey = intermediatekey - } - trustKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generate trust cert: %s", err) - } - - return trustKey, chain -} - -func TestChainVerify(t *testing.T) { - caKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - trustKey, chain := generateTrustChain(t, caKey, ca) - - testMap, _ := createTestJSON("verifySignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSONSignature from map: %s", err) - } - - err = js.SignWithChain(trustKey, chain) - if err != nil { - t.Fatalf("Error signing with chain: %s", err) - } - - pool := x509.NewCertPool() - pool.AddCert(ca) - chains, err := js.VerifyChains(pool) - if err != nil { - t.Fatalf("Error verifying content: %s", err) - } - if len(chains) != 1 { - t.Fatalf("Unexpected chains length: %d", len(chains)) - } - if len(chains[0]) != 7 { - t.Fatalf("Unexpected chain length: %d", len(chains[0])) - } -} - -func TestInvalidChain(t *testing.T) { - caKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - trustKey, chain := generateTrustChain(t, caKey, ca) - - testMap, _ := createTestJSON("verifySignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSONSignature from map: %s", err) - } - - err = js.SignWithChain(trustKey, chain[:5]) - if err != nil { - t.Fatalf("Error signing with chain: %s", err) - } - - pool := x509.NewCertPool() - pool.AddCert(ca) - chains, err := js.VerifyChains(pool) - if err == nil { - t.Fatalf("Expected error verifying with bad chain") - } - if len(chains) != 0 { - t.Fatalf("Unexpected chains returned from invalid verify") - } -} - -func TestMergeSignatures(t *testing.T) { - pk1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key 1: %v", err) - } - - pk2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key 2: %v", err) - } - - payload := make([]byte, 1<<10) - if _, err = io.ReadFull(rand.Reader, payload); err != nil { - t.Fatalf("error generating payload: %v", err) - } - - payload, _ = json.Marshal(map[string]interface{}{"data": payload}) - - sig1, err := NewJSONSignature(payload) - if err != nil { - t.Fatalf("unexpected error creating signature 1: %v", err) - } - - if err := sig1.Sign(pk1); err != nil { - t.Fatalf("unexpected error signing with pk1: %v", err) - } - - sig2, err := NewJSONSignature(payload) - if err != nil { - t.Fatalf("unexpected error creating signature 2: %v", err) - } - - if err := sig2.Sign(pk2); err != nil { - t.Fatalf("unexpected error signing with pk2: %v", err) - } - - // Now, we actually merge into sig1 - if err := sig1.Merge(sig2); err != nil { - t.Fatalf("unexpected error merging: %v", err) - } - - // Verify the new signature package - pubkeys, err := sig1.Verify() - if err != nil { - t.Fatalf("unexpected error during verify: %v", err) - } - - // Make sure the pubkeys match the two private keys from before - privkeys := map[string]PrivateKey{ - pk1.KeyID(): pk1, - pk2.KeyID(): pk2, - } - - found := map[string]struct{}{} - - for _, pubkey := range pubkeys { - if _, ok := privkeys[pubkey.KeyID()]; !ok { - t.Fatalf("unexpected public key found during verification: %v", pubkey) - } - - found[pubkey.KeyID()] = struct{}{} - } - - // Make sure we've found all the private keys from verification - for keyid, _ := range privkeys { - if _, ok := found[keyid]; !ok { - t.Fatalf("public key %v not found during verification", keyid) - } - } - - // Create another signature, with a different payload, and ensure we get an error. - sig3, err := NewJSONSignature([]byte("{}")) - if err != nil { - t.Fatalf("unexpected error making signature for sig3: %v", err) - } - - if err := sig1.Merge(sig3); err == nil { - t.Fatalf("error expected during invalid merge with different payload") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key.go deleted file mode 100644 index 73642db2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key.go +++ /dev/null @@ -1,253 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" -) - -// PublicKey is a generic interface for a Public Key. -type PublicKey interface { - // KeyType returns the key type for this key. For elliptic curve keys, - // this value should be "EC". For RSA keys, this value should be "RSA". - KeyType() string - // KeyID returns a distinct identifier which is unique to this Public Key. - // The format generated by this library is a base32 encoding of a 240 bit - // hash of the public key data divided into 12 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - KeyID() string - // Verify verifyies the signature of the data in the io.Reader using this - // Public Key. The alg parameter should identify the digital signature - // algorithm which was used to produce the signature and should be - // supported by this public key. Returns a nil error if the signature - // is valid. - Verify(data io.Reader, alg string, signature []byte) error - // CryptoPublicKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The type - // is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPublicKey() crypto.PublicKey - // These public keys can be serialized to the standard JSON encoding for - // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web - // Algorithms. - MarshalJSON() ([]byte, error) - // These keys can also be serialized to the standard PEM encoding. - PEMBlock() (*pem.Block, error) - // The string representation of a key is its key type and ID. - String() string - AddExtendedField(string, interface{}) - GetExtendedField(string) interface{} -} - -// PrivateKey is a generic interface for a Private Key. -type PrivateKey interface { - // A PrivateKey contains all fields and methods of a PublicKey of the - // same type. The MarshalJSON method also outputs the private key as a - // JSON Web Key, and the PEMBlock method outputs the private key as a - // PEM block. - PublicKey - // PublicKey returns the PublicKey associated with this PrivateKey. - PublicKey() PublicKey - // Sign signs the data read from the io.Reader using a signature algorithm - // supported by the private key. If the specified hashing algorithm is - // supported by this key, that hash function is used to generate the - // signature otherwise the the default hashing algorithm for this key is - // used. Returns the signature and identifier of the algorithm used. - Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) - // CryptoPrivateKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The - // type is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPrivateKey() crypto.PrivateKey -} - -// FromCryptoPublicKey returns a libtrust PublicKey representation of the given -// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { - switch cryptoPublicKey := cryptoPublicKey.(type) { - case *ecdsa.PublicKey: - return fromECPublicKey(cryptoPublicKey) - case *rsa.PublicKey: - return fromRSAPublicKey(cryptoPublicKey), nil - default: - return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) - } -} - -// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given -// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { - switch cryptoPrivateKey := cryptoPrivateKey.(type) { - case *ecdsa.PrivateKey: - return fromECPrivateKey(cryptoPrivateKey) - case *rsa.PrivateKey: - return fromRSAPrivateKey(cryptoPrivateKey), nil - default: - return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) - } -} - -// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust -// PublicKey or an error if there is a problem with the encoding. -func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - return pubKeyFromPEMBlock(pemBlock) -} - -// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of -// PEM blocks appended one after the other and returns a slice of PublicKey -// objects that it finds. -func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { - pubKeys := []PublicKey{} - - for { - var pemBlock *pem.Block - pemBlock, data = pem.Decode(data) - if pemBlock == nil { - break - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - pubKey, err := pubKeyFromPEMBlock(pemBlock) - if err != nil { - return nil, err - } - - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust -// PrivateKey or an error if there is a problem with the encoding. -func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } - - var key PrivateKey - - switch { - case pemBlock.Type == "RSA PRIVATE KEY": - rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) - } - key = fromRSAPrivateKey(rsaPrivateKey) - case pemBlock.Type == "EC PRIVATE KEY": - ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) - } - key, err = fromECPrivateKey(ecPrivateKey) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) - } - - addPEMHeadersToKey(pemBlock, key.PublicKey()) - - return key, nil -} - -// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic -// Public Key to be used with libtrust. -func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Public Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Public Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC public key. - return ecPublicKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA public key. - return rsaPublicKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Public Key type not supported: %q\n", kty, - ) - } -} - -// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set -// and returns a slice of Public Key objects. -func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { - rawKeys, err := loadJSONKeySetRaw(data) - if err != nil { - return nil, err - } - - pubKeys := make([]PublicKey, 0, len(rawKeys)) - - for _, rawKey := range rawKeys { - pubKey, err := UnmarshalPublicKeyJWK(rawKey) - if err != nil { - return nil, err - } - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic -// Private Key to be used with libtrust. -func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Private Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Private Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC private key. - return ecPrivateKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA private key. - return rsaPrivateKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Private Key type not supported: %q\n", kty, - ) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go deleted file mode 100644 index c526de54..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go +++ /dev/null @@ -1,255 +0,0 @@ -package libtrust - -import ( - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "os" - "strings" -) - -var ( - // ErrKeyFileDoesNotExist indicates that the private key file does not exist. - ErrKeyFileDoesNotExist = errors.New("key file does not exist") -) - -func readKeyFileBytes(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if os.IsNotExist(err) { - err = ErrKeyFileDoesNotExist - } else { - err = fmt.Errorf("unable to read key file %s: %s", filename, err) - } - - return nil, err - } - - return data, nil -} - -/* - Loading and Saving of Public and Private Keys in either PEM or JWK format. -*/ - -// LoadKeyFile opens the given filename and attempts to read a Private Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadKeyFile(filename string) (PrivateKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PrivateKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPrivateKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key JWK: %s", err) - } - } else { - key, err = UnmarshalPrivateKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key PEM: %s", err) - } - } - - return key, nil -} - -// LoadPublicKeyFile opens the given filename and attempts to read a Public Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadPublicKeyFile(filename string) (PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PublicKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPublicKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key JWK: %s", err) - } - } else { - key, err = UnmarshalPublicKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key PEM: %s", err) - } - } - - return key, nil -} - -// SaveKey saves the given key to a file using the provided filename. -// This process will overwrite any existing file at the provided location. -func SaveKey(filename string, key PrivateKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode private key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) - if err != nil { - return fmt.Errorf("unable to write private key file %s: %s", filename, err) - } - - return nil -} - -// SavePublicKey saves the given public key to the file. -func SavePublicKey(filename string, key PublicKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode public key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode public key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write public key file %s: %s", filename, err) - } - - return nil -} - -// Public Key Set files - -type jwkSet struct { - Keys []json.RawMessage `json:"keys"` -} - -// LoadKeySetFile loads a key set -func LoadKeySetFile(filename string) ([]PublicKey, error) { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return loadJSONKeySetFile(filename) - } - - // Must be a PEM format file - return loadPEMKeySetFile(filename) -} - -func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { - if len(data) == 0 { - // This is okay, just return an empty slice. - return []json.RawMessage{}, nil - } - - keySet := jwkSet{} - - err := json.Unmarshal(data, &keySet) - if err != nil { - return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) - } - - return keySet.Keys, nil -} - -func loadJSONKeySetFile(filename string) ([]PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyJWKSet(contents) -} - -func loadPEMKeySetFile(filename string) ([]PublicKey, error) { - data, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyPEMBundle(data) -} - -// AddKeySetFile adds a key to a key set -func AddKeySetFile(filename string, key PublicKey) error { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return addKeySetJSONFile(filename, key) - } - - // Must be a PEM format file - return addKeySetPEMFile(filename, key) -} - -func addKeySetJSONFile(filename string, key PublicKey) error { - encodedKey, err := json.Marshal(key) - if err != nil { - return fmt.Errorf("unable to encode trusted client key: %s", err) - } - - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return err - } - - rawEntries, err := loadJSONKeySetRaw(contents) - if err != nil { - return err - } - - rawEntries = append(rawEntries, json.RawMessage(encodedKey)) - entriesWrapper := jwkSet{Keys: rawEntries} - - encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") - if err != nil { - return fmt.Errorf("unable to encode trusted client keys: %s", err) - } - - err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) - } - - return nil -} - -func addKeySetPEMFile(filename string, key PublicKey) error { - // Encode to PEM, open file for appending, write PEM. - file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) - } - defer file.Close() - - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encoded trusted key: %s", err) - } - - _, err = file.Write(pem.EncodeToMemory(pemBlock)) - if err != nil { - return fmt.Errorf("unable to write trusted keys file: %s", err) - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go deleted file mode 100644 index 57e691f2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package libtrust - -import ( - "errors" - "io/ioutil" - "os" - "testing" -) - -func makeTempFile(t *testing.T, prefix string) (filename string) { - file, err := ioutil.TempFile("", prefix) - if err != nil { - t.Fatal(err) - } - - filename = file.Name() - file.Close() - - return -} - -func TestKeyFiles(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - testKeyFiles(t, key) - - key, err = GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - testKeyFiles(t, key) -} - -func testKeyFiles(t *testing.T, key PrivateKey) { - var err error - - privateKeyFilename := makeTempFile(t, "private_key") - privateKeyFilenamePEM := privateKeyFilename + ".pem" - privateKeyFilenameJWK := privateKeyFilename + ".jwk" - - publicKeyFilename := makeTempFile(t, "public_key") - publicKeyFilenamePEM := publicKeyFilename + ".pem" - publicKeyFilenameJWK := publicKeyFilename + ".jwk" - - if err = SaveKey(privateKeyFilenamePEM, key); err != nil { - t.Fatal(err) - } - - if err = SaveKey(privateKeyFilenameJWK, key); err != nil { - t.Fatal(err) - } - - if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil { - t.Fatal(err) - } - - if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil { - t.Fatal(err) - } - - loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM) - if err != nil { - t.Fatal(err) - } - - loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK) - if err != nil { - t.Fatal(err) - } - - loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM) - if err != nil { - t.Fatal(err) - } - - loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK) - if err != nil { - t.Fatal(err) - } - - if key.KeyID() != loadedPEMKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedJWKKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedPEMPublicKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedJWKPublicKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - os.Remove(privateKeyFilename) - os.Remove(privateKeyFilenamePEM) - os.Remove(privateKeyFilenameJWK) - os.Remove(publicKeyFilename) - os.Remove(publicKeyFilenamePEM) - os.Remove(publicKeyFilenameJWK) -} - -func TestTrustedHostKeysFile(t *testing.T) { - trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys") - trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem" - trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json" - - testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM) - testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK) - - os.Remove(trustedHostKeysFilename) - os.Remove(trustedHostKeysFilenamePEM) - os.Remove(trustedHostKeysFilenameJWK) -} - -func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { - hostAddress1 := "docker.example.com:2376" - hostKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - hostKey1.AddExtendedField("hosts", []string{hostAddress1}) - err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename) - if err != nil { - t.Fatal(err) - } - - for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %d\n", addr) - t.Logf("Host Key: %s\n\n", hostKey) - } - - hostAddress2 := "192.168.59.103:2376" - hostKey2, err := GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - hostKey2.AddExtendedField("hosts", hostAddress2) - err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename) - if err != nil { - t.Fatal(err) - } - - for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %d\n", addr) - t.Logf("Host Key: %s\n\n", hostKey) - } - -} - -func TestTrustedClientKeysFile(t *testing.T) { - trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys") - trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem" - trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json" - - testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM) - testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK) - - os.Remove(trustedClientKeysFilename) - os.Remove(trustedClientKeysFilenamePEM) - os.Remove(trustedClientKeysFilenameJWK) -} - -func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) { - clientKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename) - if err != nil { - t.Fatal(err) - } - - for _, clientKey := range trustedClientKeys { - t.Logf("Client Key: %s\n", clientKey) - } - - clientKey2, err := GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename) - if err != nil { - t.Fatal(err) - } - - for _, clientKey := range trustedClientKeys { - t.Logf("Client Key: %s\n", clientKey) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go deleted file mode 100644 index 9a98ae35..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "sync" -) - -// ClientKeyManager manages client keys on the filesystem -type ClientKeyManager struct { - key PrivateKey - clientFile string - clientDir string - - clientLock sync.RWMutex - clients []PublicKey - - configLock sync.Mutex - configs []*tls.Config -} - -// NewClientKeyManager loads a new manager from a set of key files -// and managed by the given private key. -func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { - m := &ClientKeyManager{ - key: trustKey, - clientFile: clientFile, - clientDir: clientDir, - } - if err := m.loadKeys(); err != nil { - return nil, err - } - // TODO Start watching file and directory - - return m, nil -} - -func (c *ClientKeyManager) loadKeys() (err error) { - // Load authorized keys file - var clients []PublicKey - if c.clientFile != "" { - clients, err = LoadKeySetFile(c.clientFile) - if err != nil { - return fmt.Errorf("unable to load authorized keys: %s", err) - } - } - - // Add clients from authorized keys directory - files, err := ioutil.ReadDir(c.clientDir) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unable to open authorized keys directory: %s", err) - } - for _, f := range files { - if !f.IsDir() { - publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) - if err != nil { - return fmt.Errorf("unable to load authorized key file: %s", err) - } - clients = append(clients, publicKey) - } - } - - c.clientLock.Lock() - c.clients = clients - c.clientLock.Unlock() - - return nil -} - -// RegisterTLSConfig registers a tls configuration to manager -// such that any changes to the keys may be reflected in -// the tls client CA pool -func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { - c.clientLock.RLock() - certPool, err := GenerateCACertPool(c.key, c.clients) - if err != nil { - return fmt.Errorf("CA pool generation error: %s", err) - } - c.clientLock.RUnlock() - - tlsConfig.ClientCAs = certPool - - c.configLock.Lock() - c.configs = append(c.configs, tlsConfig) - c.configLock.Unlock() - - return nil -} - -// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for -// libtrust identity authentication for the domain specified -func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - if err := clients.RegisterTLSConfig(tlsConfig); err != nil { - return nil, err - } - - // Generate cert - ips, domains, err := parseAddr(addr) - if err != nil { - return nil, err - } - // add domain that it expects clients to use - domains = append(domains, domain) - x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - return tlsConfig, nil -} - -// NewCertAuthTLSConfig creates a tls.Config for the server to use for -// certificate authentication -func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - cert, err := tls.LoadX509KeyPair(certPath, keyPath) - if err != nil { - return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // Verify client certificates against a CA? - if caPath != "" { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool - } - - return tlsConfig, nil -} - -func newTLSConfig() *tls.Config { - return &tls.Config{ - NextProtos: []string{"http/1.1"}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } -} - -// parseAddr parses an address into an array of IPs and domains -func parseAddr(addr string) ([]net.IP, []string, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, nil, err - } - var domains []string - var ips []net.IP - ip := net.ParseIP(host) - if ip != nil { - ips = []net.IP{ip} - } else { - domains = []string{host} - } - return ips, domains, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go deleted file mode 100644 index f6c59cc4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package libtrust - -import ( - "testing" -) - -type generateFunc func() (PrivateKey, error) - -func runGenerateBench(b *testing.B, f generateFunc, name string) { - for i := 0; i < b.N; i++ { - _, err := f() - if err != nil { - b.Fatalf("Error generating %s: %s", name, err) - } - } -} - -func runFingerprintBench(b *testing.B, f generateFunc, name string) { - b.StopTimer() - // Don't count this relatively slow generation call. - key, err := f() - if err != nil { - b.Fatalf("Error generating %s: %s", name, err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - if key.KeyID() == "" { - b.Fatalf("Error generating key ID for %s", name) - } - } -} - -func BenchmarkECP256Generate(b *testing.B) { - runGenerateBench(b, GenerateECP256PrivateKey, "P256") -} - -func BenchmarkECP384Generate(b *testing.B) { - runGenerateBench(b, GenerateECP384PrivateKey, "P384") -} - -func BenchmarkECP521Generate(b *testing.B) { - runGenerateBench(b, GenerateECP521PrivateKey, "P521") -} - -func BenchmarkRSA2048Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA2048PrivateKey, "RSA2048") -} - -func BenchmarkRSA3072Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA3072PrivateKey, "RSA3072") -} - -func BenchmarkRSA4096Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA4096PrivateKey, "RSA4096") -} - -func BenchmarkECP256Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP256PrivateKey, "P256") -} - -func BenchmarkECP384Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP384PrivateKey, "P384") -} - -func BenchmarkECP521Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP521PrivateKey, "P521") -} - -func BenchmarkRSA2048Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA2048PrivateKey, "RSA2048") -} - -func BenchmarkRSA3072Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA3072PrivateKey, "RSA3072") -} - -func BenchmarkRSA4096Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA4096PrivateKey, "RSA4096") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go deleted file mode 100644 index dac4cacf..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go +++ /dev/null @@ -1,427 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * RSA DSA PUBLIC KEY - */ - -// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. -type rsaPublicKey struct { - *rsa.PublicKey - extended map[string]interface{} -} - -func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { - return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} -} - -// KeyType returns the JWK key type for RSA keys, i.e., "RSA". -func (k *rsaPublicKey) KeyType() string { - return "RSA" -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *rsaPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *rsaPublicKey) String() string { - return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this Public Key. -// The alg parameter should be the name of the JWA digital signature algorithm -// which was used to produce the signature and should be supported by this -// public key. Returns a nil error if the signature is valid. -func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // Verify the signature of the given date, return non-nil error if valid. - sigAlg, err := rsaSignatureAlgorithmByName(alg) - if err != nil { - return fmt.Errorf("unable to verify Signature: %s", err) - } - - hasher := sigAlg.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) - if err != nil { - return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *rsaPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) - jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *rsaPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract 'n', 'e', and 'kid' and check for - // consistency. - - // Get the modulus parameter N. - nB64Url, err := stringFromMap(jwk, "n") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - n, err := parseRSAModulusParam(nB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - // Get the public exponent E. - eB64Url, err := stringFromMap(jwk, "e") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - e, err := parseRSAPublicExponentParam(eB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - key := &rsaPublicKey{ - PublicKey: &rsa.PublicKey{N: n, E: e}, - } - - // Key ID is optional, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) - } - } - - if _, ok := jwk["d"]; ok { - return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") - } - - key.extended = jwk - - return key, nil -} - -/* - * RSA DSA PRIVATE KEY - */ - -// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. -type rsaPrivateKey struct { - rsaPublicKey - *rsa.PrivateKey -} - -func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { - return &rsaPrivateKey{ - *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), - cryptoPrivateKey, - } -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *rsaPrivateKey) PublicKey() PublicKey { - return &k.rsaPublicKey -} - -func (k *rsaPrivateKey) String() string { - return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the RSA private key. If the specified hashing algorithm is supported by -// this key, that hash function is used to generate the signature otherwise the -// the default hashing algorithm for this key is used. Returns the signature -// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", -// "RS512". -func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) - hasher := sigAlg.HashID().New() - - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - - alg = sigAlg.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *rsaPrivateKey) toMap() map[string]interface{} { - k.Precompute() // Make sure the precomputed values are stored. - jwk := k.rsaPublicKey.toMap() - - jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) - jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) - jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) - jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) - jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) - jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) - - otherPrimes := k.Primes[2:] - - if len(otherPrimes) > 0 { - otherPrimesInfo := make([]interface{}, len(otherPrimes)) - for i, r := range otherPrimes { - otherPrimeInfo := make(map[string]string, 3) - otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) - crtVal := k.Precomputed.CRTValues[i] - otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) - otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) - otherPrimesInfo[i] = otherPrimeInfo - } - jwk["oth"] = otherPrimesInfo - } - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) -} - -func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { - // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that - // only the private key exponent 'd' is REQUIRED, the others are just for - // signature/decryption optimizations and SHOULD be included when the JWK - // is produced. We MAY choose to accept a JWK which only includes 'd', but - // we're going to go ahead and not choose to accept it without the extra - // fields. Only the 'oth' field will be optional (for multi-prime keys). - privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) - } - firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - var oth interface{} - if _, ok := jwk["oth"]; ok { - oth = jwk["oth"] - delete(jwk, "oth") - } - - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract the public key information, then extract the private - // key values. - publicKey, err := rsaPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - privateKey := &rsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: privateExponent, - Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, - Precomputed: rsa.PrecomputedValues{ - Dp: firstFactorCRT, - Dq: secondFactorCRT, - Qinv: crtCoeff, - }, - } - - if oth != nil { - // Should be an array of more JSON objects. - otherPrimesInfo, ok := oth.([]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") - } - numOtherPrimeFactors := len(otherPrimesInfo) - if numOtherPrimeFactors == 0 { - return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") - } - otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) - productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) - crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) - - for i, val := range otherPrimesInfo { - otherPrimeinfo, ok := val.(map[string]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") - } - - otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - crtValue := crtValues[i] - crtValue.Exp = otherFactorCRT - crtValue.Coeff = otherCrtCoeff - crtValue.R = productOfPrimes - otherPrimeFactors[i] = otherPrimeFactor - productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) - } - - privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) - privateKey.Precomputed.CRTValues = crtValues - } - - key := &rsaPrivateKey{ - rsaPublicKey: *publicKey, - PrivateKey: privateKey, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { - k = new(rsaPrivateKey) - k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - - k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. -func GenerateRSA2048PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(2048) - if err != nil { - return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. -func GenerateRSA3072PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(3072) - if err != nil { - return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. -func GenerateRSA4096PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(4096) - if err != nil { - return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) - } - - return k, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go deleted file mode 100644 index 5ec7707a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package libtrust - -import ( - "bytes" - "encoding/json" - "log" - "testing" -) - -var rsaKeys []PrivateKey - -func init() { - var err error - rsaKeys, err = generateRSATestKeys() - if err != nil { - log.Fatal(err) - } -} - -func generateRSATestKeys() (keys []PrivateKey, err error) { - log.Println("Generating RSA 2048-bit Test Key") - rsa2048Key, err := GenerateRSA2048PrivateKey() - if err != nil { - return - } - - log.Println("Generating RSA 3072-bit Test Key") - rsa3072Key, err := GenerateRSA3072PrivateKey() - if err != nil { - return - } - - log.Println("Generating RSA 4096-bit Test Key") - rsa4096Key, err := GenerateRSA4096PrivateKey() - if err != nil { - return - } - - log.Println("Done generating RSA Test Keys!") - keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key} - - return -} - -func TestRSAKeys(t *testing.T) { - for _, rsaKey := range rsaKeys { - if rsaKey.KeyType() != "RSA" { - t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType()) - } - } -} - -func TestRSASignVerify(t *testing.T) { - message := "Hello, World!" - data := bytes.NewReader([]byte(message)) - - sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} - - for i, rsaKey := range rsaKeys { - sigAlg := sigAlgs[i] - - t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID()) - - data.Seek(0, 0) // Reset the byte reader - - // Sign - sig, alg, err := rsaKey.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - - // Verify - err = rsaKey.Verify(data, alg, sig) - if err != nil { - t.Fatal(err) - } - } -} - -func TestMarshalUnmarshalRSAKeys(t *testing.T) { - data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) - sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} - - for i, rsaKey := range rsaKeys { - sigAlg := sigAlgs[i] - privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ") - if err != nil { - t.Fatal(err) - } - - publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ") - if err != nil { - t.Fatal(err) - } - - t.Logf("JWK Private Key: %s", string(privateJWKJSON)) - t.Logf("JWK Public Key: %s", string(publicJWKJSON)) - - privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) - if err != nil { - t.Fatal(err) - } - - pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) - if err != nil { - t.Fatal(err) - } - - // Ensure we can sign/verify a message with the unmarshalled keys. - data.Seek(0, 0) // Reset the byte reader - signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - err = pubKey2.Verify(data, alg, signature) - if err != nil { - t.Fatal(err) - } - - // It's a good idea to validate the Private Key to make sure our - // (un)marshal process didn't corrupt the extra parameters. - k := privKey2.(*rsaPrivateKey) - err = k.PrivateKey.Validate() - if err != nil { - t.Fatal(err) - } - } -} - -func TestFromCryptoRSAKeys(t *testing.T) { - for _, rsaKey := range rsaKeys { - cryptoPrivateKey := rsaKey.CryptoPrivateKey() - cryptoPublicKey := rsaKey.CryptoPublicKey() - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - t.Fatal(err) - } - - if pubKey.KeyID() != rsaKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - - privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) - if err != nil { - t.Fatal(err) - } - - if privKey.KeyID() != rsaKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go deleted file mode 100644 index 89debf6b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go +++ /dev/null @@ -1,94 +0,0 @@ -package testutil - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "math/big" - "time" -) - -// GenerateTrustCA generates a new certificate authority for testing. -func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "CA Root", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} - -// GenerateIntermediate generates an intermediate certificate for testing using -// the parent certificate (likely a CA) and the provided keys. -func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "Intermediate", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} - -// GenerateTrustCert generates a new trust certificate for testing. Unlike the -// intermediate certificates, this certificate should be used for signature -// only, not creating certificates. -func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "Trust Cert", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageDigitalSignature, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md deleted file mode 100644 index 24124db2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md +++ /dev/null @@ -1,50 +0,0 @@ -## Libtrust TLS Config Demo - -This program generates key pairs and trust files for a TLS client and server. - -To generate the keys, run: - -``` -$ go run genkeys.go -``` - -The generated files are: - -``` -$ ls -l client_data/ server_data/ -client_data/: -total 24 --rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json --rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json --rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json - -server_data/: -total 24 --rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json --rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json --rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json -``` - -The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client. - -To start the server, run: - -``` -$ go run server.go -``` - -This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message. - -To make a request using the client, run: - -``` -$ go run client.go -``` - -This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server. - -The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running). - -``` -curl --cert cert.pem --key key.pem -k https://localhost:8888 -``` diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go deleted file mode 100644 index 0a699a0e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - privateKeyFilename = "client_data/private_key.pem" - trustedHostsFilename = "client_data/trusted_hosts.pem" -) - -func main() { - // Load Client Key. - clientKey, err := libtrust.LoadKeyFile(privateKeyFilename) - if err != nil { - log.Fatal(err) - } - - // Generate Client Certificate. - selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey) - if err != nil { - log.Fatal(err) - } - - // Load trusted host keys. - hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) - if err != nil { - log.Fatal(err) - } - - // Ensure the host we want to connect to is trusted! - host, _, err := net.SplitHostPort(serverAddress) - if err != nil { - log.Fatal(err) - } - serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false) - if err != nil { - log.Fatalf("%q is not a known and trusted host", host) - } - - // Generate a CA pool with the trusted host's key. - caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys) - if err != nil { - log.Fatal(err) - } - - // Create HTTP Client. - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - Certificates: []tls.Certificate{ - tls.Certificate{ - Certificate: [][]byte{selfSignedClientCert.Raw}, - PrivateKey: clientKey.CryptoPrivateKey(), - Leaf: selfSignedClientCert, - }, - }, - RootCAs: caPool, - }, - }, - } - - var makeRequest = func(url string) { - resp, err := client.Get(url) - if err != nil { - log.Fatal(err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Fatal(err) - } - - log.Println(resp.Status) - log.Println(string(body)) - } - - // Make the request to the trusted server! - makeRequest(fmt.Sprintf("https://%s", serverAddress)) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go deleted file mode 100644 index c65f3b6b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go +++ /dev/null @@ -1,62 +0,0 @@ -package main - -import ( - "encoding/pem" - "fmt" - "log" - "net" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - clientPrivateKeyFilename = "client_data/private_key.pem" - trustedHostsFilename = "client_data/trusted_hosts.pem" -) - -func main() { - key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename) - if err != nil { - log.Fatal(err) - } - - keyPEMBlock, err := key.PEMBlock() - if err != nil { - log.Fatal(err) - } - - encodedPrivKey := pem.EncodeToMemory(keyPEMBlock) - fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey)) - - cert, err := libtrust.GenerateSelfSignedClientCert(key) - if err != nil { - log.Fatal(err) - } - - encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) - fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert)) - - trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) - if err != nil { - log.Fatal(err) - } - - hostname, _, err := net.SplitHostPort(serverAddress) - if err != nil { - log.Fatal(err) - } - - trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false) - if err != nil { - log.Fatal(err) - } - - caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0]) - if err != nil { - log.Fatal(err) - } - - encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw}) - fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert)) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go deleted file mode 100644 index 9dc8842a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "log" - - "github.com/docker/libtrust" -) - -func main() { - // Generate client key. - clientKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - log.Fatal(err) - } - - // Add a comment for the client key. - clientKey.AddExtendedField("comment", "TLS Demo Client") - - // Save the client key, public and private versions. - err = libtrust.SaveKey("client_data/private_key.pem", clientKey) - if err != nil { - log.Fatal(err) - } - - err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate server key. - serverKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - log.Fatal(err) - } - - // Set the list of addresses to use for the server. - serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"}) - - // Save the server key, public and private versions. - err = libtrust.SaveKey("server_data/private_key.pem", serverKey) - if err != nil { - log.Fatal(err) - } - - err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate Authorized Keys file for server. - err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate Known Host Keys file for client. - err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey()) - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go deleted file mode 100644 index d3cb2ea9..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go +++ /dev/null @@ -1,80 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "html" - "log" - "net" - "net/http" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - privateKeyFilename = "server_data/private_key.pem" - authorizedClientsFilename = "server_data/trusted_clients.pem" -) - -func requestHandler(w http.ResponseWriter, r *http.Request) { - clientCert := r.TLS.PeerCertificates[0] - keyID := clientCert.Subject.CommonName - log.Printf("Request from keyID: %s\n", keyID) - fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID)) -} - -func main() { - // Load server key. - serverKey, err := libtrust.LoadKeyFile(privateKeyFilename) - if err != nil { - log.Fatal(err) - } - - // Generate server certificate. - selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert( - serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}, - ) - if err != nil { - log.Fatal(err) - } - - // Load authorized client keys. - authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename) - if err != nil { - log.Fatal(err) - } - - // Create CA pool using trusted client keys. - caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients) - if err != nil { - log.Fatal(err) - } - - // Create TLS config, requiring client certificates. - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{ - tls.Certificate{ - Certificate: [][]byte{selfSignedServerCert.Raw}, - PrivateKey: serverKey.CryptoPrivateKey(), - Leaf: selfSignedServerCert, - }, - }, - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: caPool, - } - - // Create HTTP server with simple request handler. - server := &http.Server{ - Addr: serverAddress, - Handler: http.HandlerFunc(requestHandler), - } - - // Listen and server HTTPS using the libtrust TLS config. - listener, err := net.Listen("tcp", server.Addr) - if err != nil { - log.Fatal(err) - } - tlsListener := tls.NewListener(listener, tlsConfig) - server.Serve(tlsListener) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go deleted file mode 100644 index 72b0fc36..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go +++ /dev/null @@ -1,50 +0,0 @@ -package trustgraph - -import "github.com/docker/libtrust" - -// TrustGraph represents a graph of authorization mapping -// public keys to nodes and grants between nodes. -type TrustGraph interface { - // Verifies that the given public key is allowed to perform - // the given action on the given node according to the trust - // graph. - Verify(libtrust.PublicKey, string, uint16) (bool, error) - - // GetGrants returns an array of all grant chains which are used to - // allow the requested permission. - GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error) -} - -// Grant represents a transfer of permission from one part of the -// trust graph to another. This is the only way to delegate -// permission between two different sub trees in the graph. -type Grant struct { - // Subject is the namespace being granted - Subject string - - // Permissions is a bit map of permissions - Permission uint16 - - // Grantee represents the node being granted - // a permission scope. The grantee can be - // either a namespace item or a key id where namespace - // items will always start with a '/'. - Grantee string - - // statement represents the statement used to create - // this object. - statement *Statement -} - -// Permissions -// Read node 0x01 (can read node, no sub nodes) -// Write node 0x02 (can write to node object, cannot create subnodes) -// Read subtree 0x04 (delegates read to each sub node) -// Write subtree 0x08 (delegates write to each sub node, included create on the subject) -// -// Permission shortcuts -// ReadItem = 0x01 -// WriteItem = 0x03 -// ReadAccess = 0x07 -// WriteAccess = 0x0F -// Delegate = 0x0F diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go deleted file mode 100644 index 247bfa7a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go +++ /dev/null @@ -1,133 +0,0 @@ -package trustgraph - -import ( - "strings" - - "github.com/docker/libtrust" -) - -type grantNode struct { - grants []*Grant - children map[string]*grantNode -} - -type memoryGraph struct { - roots map[string]*grantNode -} - -func newGrantNode() *grantNode { - return &grantNode{ - grants: []*Grant{}, - children: map[string]*grantNode{}, - } -} - -// NewMemoryGraph returns a new in memory trust graph created from -// a static list of grants. This graph is immutable after creation -// and any alterations should create a new instance. -func NewMemoryGraph(grants []*Grant) TrustGraph { - roots := map[string]*grantNode{} - for _, grant := range grants { - parts := strings.Split(grant.Grantee, "/") - nodes := roots - var node *grantNode - var nodeOk bool - for _, part := range parts { - node, nodeOk = nodes[part] - if !nodeOk { - node = newGrantNode() - nodes[part] = node - } - if part != "" { - node.grants = append(node.grants, grant) - } - nodes = node.children - } - } - return &memoryGraph{roots} -} - -func (g *memoryGraph) getGrants(name string) []*Grant { - nameParts := strings.Split(name, "/") - nodes := g.roots - var node *grantNode - var nodeOk bool - for _, part := range nameParts { - node, nodeOk = nodes[part] - if !nodeOk { - return nil - } - nodes = node.children - } - return node.grants -} - -func isSubName(name, sub string) bool { - if strings.HasPrefix(name, sub) { - if len(name) == len(sub) || name[len(sub)] == '/' { - return true - } - } - return false -} - -type walkFunc func(*Grant, []*Grant) bool - -func foundWalkFunc(*Grant, []*Grant) bool { - return true -} - -func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool { - if visited == nil { - visited = map[*Grant]bool{} - } - grants := g.getGrants(start) - subGrants := make([]*Grant, 0, len(grants)) - for _, grant := range grants { - if visited[grant] { - continue - } - visited[grant] = true - if grant.Permission&permission == permission { - if isSubName(target, grant.Subject) { - if f(grant, chain) { - return true - } - } else { - subGrants = append(subGrants, grant) - } - } - } - for _, grant := range subGrants { - var chainCopy []*Grant - if collect { - chainCopy = make([]*Grant, len(chain)+1) - copy(chainCopy, chain) - chainCopy[len(chainCopy)-1] = grant - } else { - chainCopy = nil - } - - if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) { - return true - } - } - return false -} - -func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) { - return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil -} - -func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) { - grants := [][]*Grant{} - collect := func(grant *Grant, chain []*Grant) bool { - grantChain := make([]*Grant, len(chain)+1) - copy(grantChain, chain) - grantChain[len(grantChain)-1] = grant - grants = append(grants, grantChain) - return false - } - g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true) - return grants, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go deleted file mode 100644 index 49fd0f3b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package trustgraph - -import ( - "fmt" - "testing" - - "github.com/docker/libtrust" -) - -func createTestKeysAndGrants(count int) ([]*Grant, []libtrust.PrivateKey) { - grants := make([]*Grant, count) - keys := make([]libtrust.PrivateKey, count) - for i := 0; i < count; i++ { - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - panic(err) - } - grant := &Grant{ - Subject: fmt.Sprintf("/user-%d", i+1), - Permission: 0x0f, - Grantee: pk.KeyID(), - } - keys[i] = pk - grants[i] = grant - } - return grants, keys -} - -func testVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { - if ok, err := g.Verify(k, target, permission); err != nil { - t.Fatalf("Unexpected error during verification: %s", err) - } else if !ok { - t.Errorf("key failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) - } -} - -func testNotVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { - if ok, err := g.Verify(k, target, permission); err != nil { - t.Fatalf("Unexpected error during verification: %s", err) - } else if ok { - t.Errorf("key should have failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) - } -} - -func TestVerify(t *testing.T) { - grants, keys := createTestKeysAndGrants(4) - extraGrants := make([]*Grant, 3) - extraGrants[0] = &Grant{ - Subject: "/user-3", - Permission: 0x0f, - Grantee: "/user-2", - } - extraGrants[1] = &Grant{ - Subject: "/user-3/sub-project", - Permission: 0x0f, - Grantee: "/user-4", - } - extraGrants[2] = &Grant{ - Subject: "/user-4", - Permission: 0x07, - Grantee: "/user-1", - } - grants = append(grants, extraGrants...) - - g := NewMemoryGraph(grants) - - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1/some-project/sub-value", 0x0f) - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x07) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2/", 0x0f) - testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3/sub-value", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-value", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project/app", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) - - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3/sub-value", 0x0f) - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1/", 0x0f) - testNotVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-2", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-4", 0x0f) - testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) -} - -func TestCircularWalk(t *testing.T) { - grants, keys := createTestKeysAndGrants(3) - user1Grant := &Grant{ - Subject: "/user-2", - Permission: 0x0f, - Grantee: "/user-1", - } - user2Grant := &Grant{ - Subject: "/user-1", - Permission: 0x0f, - Grantee: "/user-2", - } - grants = append(grants, user1Grant, user2Grant) - - g := NewMemoryGraph(grants) - - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1", 0x0f) - testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) - - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) -} - -func assertGrantSame(t *testing.T, actual, expected *Grant) { - if actual != expected { - t.Fatalf("Unexpected grant retrieved\n\tExpected: %v\n\tActual: %v", expected, actual) - } -} - -func TestGetGrants(t *testing.T) { - grants, keys := createTestKeysAndGrants(5) - extraGrants := make([]*Grant, 4) - extraGrants[0] = &Grant{ - Subject: "/user-3/friend-project", - Permission: 0x0f, - Grantee: "/user-2/friends", - } - extraGrants[1] = &Grant{ - Subject: "/user-3/sub-project", - Permission: 0x0f, - Grantee: "/user-4", - } - extraGrants[2] = &Grant{ - Subject: "/user-2/friends", - Permission: 0x0f, - Grantee: "/user-5/fun-project", - } - extraGrants[3] = &Grant{ - Subject: "/user-5/fun-project", - Permission: 0x0f, - Grantee: "/user-1", - } - grants = append(grants, extraGrants...) - - g := NewMemoryGraph(grants) - - grantChains, err := g.GetGrants(keys[3], "/user-3/sub-project/specific-app", 0x0f) - if err != nil { - t.Fatalf("Error getting grants: %s", err) - } - if len(grantChains) != 1 { - t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) - } - if len(grantChains[0]) != 2 { - t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) - } - assertGrantSame(t, grantChains[0][0], grants[3]) - assertGrantSame(t, grantChains[0][1], extraGrants[1]) - - grantChains, err = g.GetGrants(keys[0], "/user-3/friend-project/fun-app", 0x0f) - if err != nil { - t.Fatalf("Error getting grants: %s", err) - } - if len(grantChains) != 1 { - t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) - } - if len(grantChains[0]) != 4 { - t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) - } - assertGrantSame(t, grantChains[0][0], grants[0]) - assertGrantSame(t, grantChains[0][1], extraGrants[3]) - assertGrantSame(t, grantChains[0][2], extraGrants[2]) - assertGrantSame(t, grantChains[0][3], extraGrants[0]) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go deleted file mode 100644 index 7a74b553..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go +++ /dev/null @@ -1,227 +0,0 @@ -package trustgraph - -import ( - "crypto/x509" - "encoding/json" - "io" - "io/ioutil" - "sort" - "strings" - "time" - - "github.com/docker/libtrust" -) - -type jsonGrant struct { - Subject string `json:"subject"` - Permission uint16 `json:"permission"` - Grantee string `json:"grantee"` -} - -type jsonRevocation struct { - Subject string `json:"subject"` - Revocation uint16 `json:"revocation"` - Grantee string `json:"grantee"` -} - -type jsonStatement struct { - Revocations []*jsonRevocation `json:"revocations"` - Grants []*jsonGrant `json:"grants"` - Expiration time.Time `json:"expiration"` - IssuedAt time.Time `json:"issuedAt"` -} - -func (g *jsonGrant) Grant(statement *Statement) *Grant { - return &Grant{ - Subject: g.Subject, - Permission: g.Permission, - Grantee: g.Grantee, - statement: statement, - } -} - -// Statement represents a set of grants made from a verifiable -// authority. A statement has an expiration associated with it -// set by the authority. -type Statement struct { - jsonStatement - - signature *libtrust.JSONSignature -} - -// IsExpired returns whether the statement has expired -func (s *Statement) IsExpired() bool { - return s.Expiration.Before(time.Now().Add(-10 * time.Second)) -} - -// Bytes returns an indented json representation of the statement -// in a byte array. This value can be written to a file or stream -// without alteration. -func (s *Statement) Bytes() ([]byte, error) { - return s.signature.PrettySignature("signatures") -} - -// LoadStatement loads and verifies a statement from an input stream. -func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - js, err := libtrust.ParsePrettySignature(b, "signatures") - if err != nil { - return nil, err - } - payload, err := js.Payload() - if err != nil { - return nil, err - } - var statement Statement - err = json.Unmarshal(payload, &statement.jsonStatement) - if err != nil { - return nil, err - } - - if authority == nil { - _, err = js.Verify() - if err != nil { - return nil, err - } - } else { - _, err = js.VerifyChains(authority) - if err != nil { - return nil, err - } - } - statement.signature = js - - return &statement, nil -} - -// CreateStatements creates and signs a statement from a stream of grants -// and revocations in a JSON array. -func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { - var statement Statement - err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants) - if err != nil { - return nil, err - } - err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations) - if err != nil { - return nil, err - } - statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration) - statement.jsonStatement.IssuedAt = time.Now().UTC() - - b, err := json.MarshalIndent(&statement.jsonStatement, "", " ") - if err != nil { - return nil, err - } - - statement.signature, err = libtrust.NewJSONSignature(b) - if err != nil { - return nil, err - } - err = statement.signature.SignWithChain(key, chain) - if err != nil { - return nil, err - } - - return &statement, nil -} - -type statementList []*Statement - -func (s statementList) Len() int { - return len(s) -} - -func (s statementList) Less(i, j int) bool { - return s[i].IssuedAt.Before(s[j].IssuedAt) -} - -func (s statementList) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// CollapseStatements returns a single list of the valid statements as well as the -// time when the next grant will expire. -func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) { - sorted := make(statementList, 0, len(statements)) - for _, statement := range statements { - if useExpired || !statement.IsExpired() { - sorted = append(sorted, statement) - } - } - sort.Sort(sorted) - - var minExpired time.Time - var grantCount int - roots := map[string]*grantNode{} - for i, statement := range sorted { - if statement.Expiration.Before(minExpired) || i == 0 { - minExpired = statement.Expiration - } - for _, grant := range statement.Grants { - parts := strings.Split(grant.Grantee, "/") - nodes := roots - g := grant.Grant(statement) - grantCount = grantCount + 1 - - for _, part := range parts { - node, nodeOk := nodes[part] - if !nodeOk { - node = newGrantNode() - nodes[part] = node - } - node.grants = append(node.grants, g) - nodes = node.children - } - } - - for _, revocation := range statement.Revocations { - parts := strings.Split(revocation.Grantee, "/") - nodes := roots - - var node *grantNode - var nodeOk bool - for _, part := range parts { - node, nodeOk = nodes[part] - if !nodeOk { - break - } - nodes = node.children - } - if node != nil { - for _, grant := range node.grants { - if isSubName(grant.Subject, revocation.Subject) { - grant.Permission = grant.Permission &^ revocation.Revocation - } - } - } - } - } - - retGrants := make([]*Grant, 0, grantCount) - for _, rootNodes := range roots { - retGrants = append(retGrants, rootNodes.grants...) - } - - return retGrants, minExpired, nil -} - -// FilterStatements filters the statements to statements including the given grants. -func FilterStatements(grants []*Grant) ([]*Statement, error) { - statements := map[*Statement]bool{} - for _, grant := range grants { - if grant.statement != nil { - statements[grant.statement] = true - } - } - retStatements := make([]*Statement, len(statements)) - var i int - for statement := range statements { - retStatements[i] = statement - i++ - } - return retStatements, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go deleted file mode 100644 index e5094686..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go +++ /dev/null @@ -1,417 +0,0 @@ -package trustgraph - -import ( - "bytes" - "crypto/x509" - "encoding/json" - "testing" - "time" - - "github.com/docker/libtrust" - "github.com/docker/libtrust/testutil" -) - -const testStatementExpiration = time.Hour * 5 - -func generateStatement(grants []*Grant, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { - var statement Statement - - statement.Grants = make([]*jsonGrant, len(grants)) - for i, grant := range grants { - statement.Grants[i] = &jsonGrant{ - Subject: grant.Subject, - Permission: grant.Permission, - Grantee: grant.Grantee, - } - } - statement.IssuedAt = time.Now() - statement.Expiration = time.Now().Add(testStatementExpiration) - statement.Revocations = make([]*jsonRevocation, 0) - - marshalled, err := json.MarshalIndent(statement.jsonStatement, "", " ") - if err != nil { - return nil, err - } - - sig, err := libtrust.NewJSONSignature(marshalled) - if err != nil { - return nil, err - } - err = sig.SignWithChain(key, chain) - if err != nil { - return nil, err - } - statement.signature = sig - - return &statement, nil -} - -func generateTrustChain(t *testing.T, chainLen int) (libtrust.PrivateKey, *x509.CertPool, []*x509.Certificate) { - caKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - - parent := ca - parentKey := caKey - chain := make([]*x509.Certificate, chainLen) - for i := chainLen - 1; i > 0; i-- { - intermediatekey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generating intermdiate certificate: %s", err) - } - parent = chain[i] - parentKey = intermediatekey - } - trustKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generate trust cert: %s", err) - } - - caPool := x509.NewCertPool() - caPool.AddCert(ca) - - return trustKey, caPool, chain -} - -func TestLoadStatement(t *testing.T) { - grantCount := 4 - grants, _ := createTestKeysAndGrants(grantCount) - - trustKey, caPool, chain := generateTrustChain(t, 6) - - statement, err := generateStatement(grants, trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - - statementBytes, err := statement.Bytes() - if err != nil { - t.Fatalf("Error getting statement bytes: %s", err) - } - - s2, err := LoadStatement(bytes.NewReader(statementBytes), caPool) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - if len(s2.Grants) != grantCount { - t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) - } - - pool := x509.NewCertPool() - _, err = LoadStatement(bytes.NewReader(statementBytes), pool) - if err == nil { - t.Fatalf("No error thrown verifying without an authority") - } else if _, ok := err.(x509.UnknownAuthorityError); !ok { - t.Fatalf("Unexpected error verifying without authority: %s", err) - } - - s2, err = LoadStatement(bytes.NewReader(statementBytes), nil) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - if len(s2.Grants) != grantCount { - t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) - } - - badData := make([]byte, len(statementBytes)) - copy(badData, statementBytes) - badData[0] = '[' - _, err = LoadStatement(bytes.NewReader(badData), nil) - if err == nil { - t.Fatalf("No error thrown parsing bad json") - } - - alteredData := make([]byte, len(statementBytes)) - copy(alteredData, statementBytes) - alteredData[30] = '0' - _, err = LoadStatement(bytes.NewReader(alteredData), nil) - if err == nil { - t.Fatalf("No error thrown from bad data") - } -} - -func TestCollapseGrants(t *testing.T) { - grantCount := 8 - grants, keys := createTestKeysAndGrants(grantCount) - linkGrants := make([]*Grant, 4) - linkGrants[0] = &Grant{ - Subject: "/user-3", - Permission: 0x0f, - Grantee: "/user-2", - } - linkGrants[1] = &Grant{ - Subject: "/user-3/sub-project", - Permission: 0x0f, - Grantee: "/user-4", - } - linkGrants[2] = &Grant{ - Subject: "/user-6", - Permission: 0x0f, - Grantee: "/user-7", - } - linkGrants[3] = &Grant{ - Subject: "/user-6/sub-project/specific-app", - Permission: 0x0f, - Grantee: "/user-5", - } - trustKey, pool, chain := generateTrustChain(t, 3) - - statements := make([]*Statement, 3) - var err error - statements[0], err = generateStatement(grants[0:4], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[1], err = generateStatement(grants[4:], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[2], err = generateStatement(linkGrants, trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - - statementsCopy := make([]*Statement, len(statements)) - for i, statement := range statements { - b, err := statement.Bytes() - if err != nil { - t.Fatalf("Error getting statement bytes: %s", err) - } - verifiedStatement, err := LoadStatement(bytes.NewReader(b), pool) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - // Force sort by reversing order - statementsCopy[len(statementsCopy)-i-1] = verifiedStatement - } - statements = statementsCopy - - collapsedGrants, expiration, err := CollapseStatements(statements, false) - if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) - } - if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { - t.Fatalf("Unexpected expiration time: %s", expiration.String()) - } - g := NewMemoryGraph(collapsedGrants) - - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) - testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) - testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-5", 0x0f) - testVerified(t, g, keys[5].PublicKey(), "user-key-6", "/user-6", 0x0f) - testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-7", 0x0f) - testVerified(t, g, keys[7].PublicKey(), "user-key-8", "/user-8", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-project/specific-app", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) - testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6", 0x0f) - testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) - testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project/specific-app", 0x0f) - - testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) - testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-6/sub-project", 0x0f) - testNotVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project", 0x0f) - - // Add revocation grant - statements = append(statements, &Statement{ - jsonStatement{ - IssuedAt: time.Now(), - Expiration: time.Now().Add(testStatementExpiration), - Grants: []*jsonGrant{}, - Revocations: []*jsonRevocation{ - &jsonRevocation{ - Subject: "/user-1", - Revocation: 0x0f, - Grantee: keys[0].KeyID(), - }, - &jsonRevocation{ - Subject: "/user-2", - Revocation: 0x08, - Grantee: keys[1].KeyID(), - }, - &jsonRevocation{ - Subject: "/user-6", - Revocation: 0x0f, - Grantee: "/user-7", - }, - &jsonRevocation{ - Subject: "/user-9", - Revocation: 0x0f, - Grantee: "/user-10", - }, - }, - }, - nil, - }) - - collapsedGrants, expiration, err = CollapseStatements(statements, false) - if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) - } - if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { - t.Fatalf("Unexpected expiration time: %s", expiration.String()) - } - g = NewMemoryGraph(collapsedGrants) - - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) - testNotVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) - - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x07) -} - -func TestFilterStatements(t *testing.T) { - grantCount := 8 - grants, keys := createTestKeysAndGrants(grantCount) - linkGrants := make([]*Grant, 3) - linkGrants[0] = &Grant{ - Subject: "/user-3", - Permission: 0x0f, - Grantee: "/user-2", - } - linkGrants[1] = &Grant{ - Subject: "/user-5", - Permission: 0x0f, - Grantee: "/user-4", - } - linkGrants[2] = &Grant{ - Subject: "/user-7", - Permission: 0x0f, - Grantee: "/user-6", - } - - trustKey, _, chain := generateTrustChain(t, 3) - - statements := make([]*Statement, 5) - var err error - statements[0], err = generateStatement(grants[0:2], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[1], err = generateStatement(grants[2:4], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[2], err = generateStatement(grants[4:6], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[3], err = generateStatement(grants[6:], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[4], err = generateStatement(linkGrants, trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - collapsed, _, err := CollapseStatements(statements, false) - if err != nil { - t.Fatalf("Error collapsing grants: %s", err) - } - - // Filter 1, all 5 statements - filter1, err := FilterStatements(collapsed) - if err != nil { - t.Fatalf("Error filtering statements: %s", err) - } - if len(filter1) != 5 { - t.Fatalf("Wrong number of statements, expected %d, received %d", 5, len(filter1)) - } - - // Filter 2, one statement - filter2, err := FilterStatements([]*Grant{collapsed[0]}) - if err != nil { - t.Fatalf("Error filtering statements: %s", err) - } - if len(filter2) != 1 { - t.Fatalf("Wrong number of statements, expected %d, received %d", 1, len(filter2)) - } - - // Filter 3, 2 statements, from graph lookup - g := NewMemoryGraph(collapsed) - lookupGrants, err := g.GetGrants(keys[1], "/user-3", 0x0f) - if err != nil { - t.Fatalf("Error looking up grants: %s", err) - } - if len(lookupGrants) != 1 { - t.Fatalf("Wrong numberof grant chains returned from lookup, expected %d, received %d", 1, len(lookupGrants)) - } - if len(lookupGrants[0]) != 2 { - t.Fatalf("Wrong number of grants looked up, expected %d, received %d", 2, len(lookupGrants)) - } - filter3, err := FilterStatements(lookupGrants[0]) - if err != nil { - t.Fatalf("Error filtering statements: %s", err) - } - if len(filter3) != 2 { - t.Fatalf("Wrong number of statements, expected %d, received %d", 2, len(filter3)) - } - -} - -func TestCreateStatement(t *testing.T) { - grantJSON := bytes.NewReader([]byte(`[ - { - "subject": "/user-2", - "permission": 15, - "grantee": "/user-1" - }, - { - "subject": "/user-7", - "permission": 1, - "grantee": "/user-9" - }, - { - "subject": "/user-3", - "permission": 15, - "grantee": "/user-2" - } -]`)) - revocationJSON := bytes.NewReader([]byte(`[ - { - "subject": "user-8", - "revocation": 12, - "grantee": "user-9" - } -]`)) - - trustKey, pool, chain := generateTrustChain(t, 3) - - statement, err := CreateStatement(grantJSON, revocationJSON, testStatementExpiration, trustKey, chain) - if err != nil { - t.Fatalf("Error creating statement: %s", err) - } - - b, err := statement.Bytes() - if err != nil { - t.Fatalf("Error retrieving bytes: %s", err) - } - - verified, err := LoadStatement(bytes.NewReader(b), pool) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - - if len(verified.Grants) != 3 { - t.Errorf("Unexpected number of grants, expected %d, received %d", 3, len(verified.Grants)) - } - - if len(verified.Revocations) != 1 { - t.Errorf("Unexpected number of revocations, expected %d, received %d", 1, len(verified.Revocations)) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/util.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/util.go deleted file mode 100644 index 45dc3e18..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/util.go +++ /dev/null @@ -1,361 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/elliptic" - "crypto/tls" - "crypto/x509" - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net/url" - "os" - "path/filepath" - "strings" - "time" -) - -// LoadOrCreateTrustKey will load a PrivateKey from the specified path -func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { - if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { - return nil, err - } - - trustKey, err := LoadKeyFile(trustKeyPath) - if err == ErrKeyFileDoesNotExist { - trustKey, err = GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("error generating key: %s", err) - } - - if err := SaveKey(trustKeyPath, trustKey); err != nil { - return nil, fmt.Errorf("error saving key file: %s", err) - } - - dir, file := filepath.Split(trustKeyPath) - if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { - return nil, fmt.Errorf("error saving public key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("error loading key file: %s", err) - } - return trustKey, nil -} - -// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity -// based authentication from the specified dockerUrl, the rootConfigPath and -// the server name to which it is connecting. -// If trustUnknownHosts is true it will automatically add the host to the -// known-hosts.json in rootConfigPath. -func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - trustKeyPath := filepath.Join(rootConfigPath, "key.json") - knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") - - u, err := url.Parse(dockerUrl) - if err != nil { - return nil, fmt.Errorf("unable to parse machine url") - } - - if u.Scheme == "unix" { - return nil, nil - } - - addr := u.Host - proto := "tcp" - - trustKey, err := LoadOrCreateTrustKey(trustKeyPath) - if err != nil { - return nil, fmt.Errorf("unable to load trust key: %s", err) - } - - knownHosts, err := LoadKeySetFile(knownHostsPath) - if err != nil { - return nil, fmt.Errorf("could not load trusted hosts file: %s", err) - } - - allowedHosts, err := FilterByHosts(knownHosts, addr, false) - if err != nil { - return nil, fmt.Errorf("error filtering hosts: %s", err) - } - - certPool, err := GenerateCACertPool(trustKey, allowedHosts) - if err != nil { - return nil, fmt.Errorf("Could not create CA pool: %s", err) - } - - tlsConfig.ServerName = serverName - tlsConfig.RootCAs = certPool - - x509Cert, err := GenerateSelfSignedClientCert(trustKey) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - tlsConfig.InsecureSkipVerify = true - - testConn, err := tls.Dial(proto, addr, tlsConfig) - if err != nil { - return nil, fmt.Errorf("tls Handshake error: %s", err) - } - - opts := x509.VerifyOptions{ - Roots: tlsConfig.RootCAs, - CurrentTime: time.Now(), - DNSName: tlsConfig.ServerName, - Intermediates: x509.NewCertPool(), - } - - certs := testConn.ConnectionState().PeerCertificates - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - - if _, err := certs[0].Verify(opts); err != nil { - if _, ok := err.(x509.UnknownAuthorityError); ok { - if trustUnknownHosts { - pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) - if err != nil { - return nil, fmt.Errorf("error extracting public key from cert: %s", err) - } - - pubKey.AddExtendedField("hosts", []string{addr}) - - if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { - return nil, fmt.Errorf("error adding machine to known hosts: %s", err) - } - } else { - return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) - } - } - } - - testConn.Close() - tlsConfig.InsecureSkipVerify = false - - return tlsConfig, nil -} - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters ommitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func keyIDEncode(b []byte) string { - s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") - var buf bytes.Buffer - var i int - for i = 0; i < len(s)/4-1; i++ { - start := i * 4 - end := start + 4 - buf.WriteString(s[start:end] + ":") - } - buf.WriteString(s[i*4:]) - return buf.String() -} - -func keyIDFromCryptoKey(pubKey PublicKey) string { - // Generate and return a 'libtrust' fingerprint of the public key. - // For an RSA key this should be: - // SHA256(DER encoded ASN1) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) - if err != nil { - return "" - } - hasher := crypto.SHA256.New() - hasher.Write(derBytes) - return keyIDEncode(hasher.Sum(nil)[:30]) -} - -func stringFromMap(m map[string]interface{}, key string) (string, error) { - val, ok := m[key] - if !ok { - return "", fmt.Errorf("%q value not specified", key) - } - - str, ok := val.(string) - if !ok { - return "", fmt.Errorf("%q value must be a string", key) - } - delete(m, key) - - return str, nil -} - -func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { - curveByteLen := (curve.Params().BitSize + 7) >> 3 - - cBytes, err := joseBase64UrlDecode(cB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - cByteLength := len(cBytes) - if cByteLength != curveByteLen { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) - } - return new(big.Int).SetBytes(cBytes), nil -} - -func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { - dBytes, err := joseBase64UrlDecode(dB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := curve.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - dByteLength := len(dBytes) - - if dByteLength != octetLength { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) - } - - return new(big.Int).SetBytes(dBytes), nil -} - -func parseRSAModulusParam(nB64Url string) (*big.Int, error) { - nBytes, err := joseBase64UrlDecode(nB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(nBytes), nil -} - -func serializeRSAPublicExponentParam(e int) []byte { - // We MUST use the minimum number of octets to represent E. - // E is supposed to be 65537 for performance and security reasons - // and is what golang's rsa package generates, but it might be - // different if imported from some other generator. - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(e)) - var i int - for i = 0; i < 8; i++ { - if buf[i] != 0 { - break - } - } - return buf[i:] -} - -func parseRSAPublicExponentParam(eB64Url string) (int, error) { - eBytes, err := joseBase64UrlDecode(eB64Url) - if err != nil { - return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - // Only the minimum number of bytes were used to represent E, but - // binary.BigEndian.Uint32 expects at least 4 bytes, so we need - // to add zero padding if necassary. - byteLen := len(eBytes) - buf := make([]byte, 4-byteLen, 4) - eBytes = append(buf, eBytes...) - - return int(binary.BigEndian.Uint32(eBytes)), nil -} - -func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { - b64Url, err := stringFromMap(m, key) - if err != nil { - return nil, err - } - - paramBytes, err := joseBase64UrlDecode(b64Url) - if err != nil { - return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(paramBytes), nil -} - -func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { - pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} - for k, v := range headers { - switch val := v.(type) { - case string: - pemBlock.Headers[k] = val - case []string: - if k == "hosts" { - pemBlock.Headers[k] = strings.Join(val, ",") - } else { - // Return error, non-encodable type - } - default: - // Return error, non-encodable type - } - } - - return pemBlock, nil -} - -func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { - cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) - } - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - return nil, err - } - - addPEMHeadersToKey(pemBlock, pubKey) - - return pubKey, nil -} - -func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { - for key, value := range pemBlock.Headers { - var safeVal interface{} - if key == "hosts" { - safeVal = strings.Split(value, ",") - } else { - safeVal = value - } - pubKey.AddExtendedField(key, safeVal) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go deleted file mode 100644 index ee54f5b8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package libtrust - -import ( - "encoding/pem" - "reflect" - "testing" -) - -func TestAddPEMHeadersToKey(t *testing.T) { - pk := &rsaPublicKey{nil, map[string]interface{}{}} - blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}} - addPEMHeadersToKey(blk, pk) - - val := pk.GetExtendedField("hosts") - hosts, ok := val.([]string) - if !ok { - t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val)) - } - expected := []string{"localhost", "127.0.0.1"} - if !reflect.DeepEqual(hosts, expected) { - t.Errorf("hosts(%v), expected %v", hosts, expected) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go deleted file mode 100644 index ce78eff6..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package internal - -import ( - "strings" -) - -const ( - WatchState = 1 << iota - MultiState - SubscribeState - MonitorState -) - -type CommandInfo struct { - Set, Clear int -} - -var commandInfos = map[string]CommandInfo{ - "WATCH": {Set: WatchState}, - "UNWATCH": {Clear: WatchState}, - "MULTI": {Set: MultiState}, - "EXEC": {Clear: WatchState | MultiState}, - "DISCARD": {Clear: WatchState | MultiState}, - "PSUBSCRIBE": {Set: SubscribeState}, - "SUBSCRIBE": {Set: SubscribeState}, - "MONITOR": {Set: MonitorState}, -} - -func LookupCommandInfo(commandName string) CommandInfo { - return commandInfos[strings.ToUpper(commandName)] -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go deleted file mode 100644 index 5f955c42..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2014 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package redistest contains utilities for writing Redigo tests. -package redistest - -import ( - "errors" - "time" - - "github.com/garyburd/redigo/redis" -) - -type testConn struct { - redis.Conn -} - -func (t testConn) Close() error { - _, err := t.Conn.Do("SELECT", "9") - if err != nil { - return nil - } - _, err = t.Conn.Do("FLUSHDB") - if err != nil { - return err - } - return t.Conn.Close() -} - -// Dial dials the local Redis server and selects database 9. To prevent -// stomping on real data, DialTestDB fails if database 9 contains data. The -// returned connection flushes database 9 on close. -func Dial() (redis.Conn, error) { - c, err := redis.DialTimeout("tcp", ":6379", 0, 1*time.Second, 1*time.Second) - if err != nil { - return nil, err - } - - _, err = c.Do("SELECT", "9") - if err != nil { - return nil, err - } - - n, err := redis.Int(c.Do("DBSIZE")) - if err != nil { - return nil, err - } - - if n != 0 { - return nil, errors.New("database #9 is not empty, test can not continue") - } - - return testConn{c}, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go deleted file mode 100644 index ac0e971c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "net" - "strconv" - "sync" - "time" -) - -// conn is the low-level implementation of Conn -type conn struct { - - // Shared - mu sync.Mutex - pending int - err error - conn net.Conn - - // Read - readTimeout time.Duration - br *bufio.Reader - - // Write - writeTimeout time.Duration - bw *bufio.Writer - - // Scratch space for formatting argument length. - // '*' or '$', length, "\r\n" - lenScratch [32]byte - - // Scratch space for formatting integers and floats. - numScratch [40]byte -} - -// Dial connects to the Redis server at the given network and address. -func Dial(network, address string) (Conn, error) { - dialer := xDialer{} - return dialer.Dial(network, address) -} - -// DialTimeout acts like Dial but takes timeouts for establishing the -// connection to the server, writing a command and reading a reply. -func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { - netDialer := net.Dialer{Timeout: connectTimeout} - dialer := xDialer{ - NetDial: netDialer.Dial, - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - } - return dialer.Dial(network, address) -} - -// A Dialer specifies options for connecting to a Redis server. -type xDialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, then net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // ReadTimeout specifies the timeout for reading a single command - // reply. If ReadTimeout is zero, then no timeout is used. - ReadTimeout time.Duration - - // WriteTimeout specifies the timeout for writing a single command. If - // WriteTimeout is zero, then no timeout is used. - WriteTimeout time.Duration -} - -// Dial connects to the Redis server at address on the named network. -func (d *xDialer) Dial(network, address string) (Conn, error) { - dial := d.NetDial - if dial == nil { - dial = net.Dial - } - netConn, err := dial(network, address) - if err != nil { - return nil, err - } - return &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: d.ReadTimeout, - writeTimeout: d.WriteTimeout, - }, nil -} - -// NewConn returns a new Redigo connection for the given net connection. -func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { - return &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: readTimeout, - writeTimeout: writeTimeout, - } -} - -func (c *conn) Close() error { - c.mu.Lock() - err := c.err - if c.err == nil { - c.err = errors.New("redigo: closed") - err = c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) fatal(err error) error { - c.mu.Lock() - if c.err == nil { - c.err = err - // Close connection to force errors on subsequent calls and to unblock - // other reader or writer. - c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) Err() error { - c.mu.Lock() - err := c.err - c.mu.Unlock() - return err -} - -func (c *conn) writeLen(prefix byte, n int) error { - c.lenScratch[len(c.lenScratch)-1] = '\n' - c.lenScratch[len(c.lenScratch)-2] = '\r' - i := len(c.lenScratch) - 3 - for { - c.lenScratch[i] = byte('0' + n%10) - i -= 1 - n = n / 10 - if n == 0 { - break - } - } - c.lenScratch[i] = prefix - _, err := c.bw.Write(c.lenScratch[i:]) - return err -} - -func (c *conn) writeString(s string) error { - c.writeLen('$', len(s)) - c.bw.WriteString(s) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeBytes(p []byte) error { - c.writeLen('$', len(p)) - c.bw.Write(p) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeInt64(n int64) error { - return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) -} - -func (c *conn) writeFloat64(n float64) error { - return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) -} - -func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { - c.writeLen('*', 1+len(args)) - err = c.writeString(cmd) - for _, arg := range args { - if err != nil { - break - } - switch arg := arg.(type) { - case string: - err = c.writeString(arg) - case []byte: - err = c.writeBytes(arg) - case int: - err = c.writeInt64(int64(arg)) - case int64: - err = c.writeInt64(arg) - case float64: - err = c.writeFloat64(arg) - case bool: - if arg { - err = c.writeString("1") - } else { - err = c.writeString("0") - } - case nil: - err = c.writeString("") - default: - var buf bytes.Buffer - fmt.Fprint(&buf, arg) - err = c.writeBytes(buf.Bytes()) - } - } - return err -} - -type protocolError string - -func (pe protocolError) Error() string { - return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) -} - -func (c *conn) readLine() ([]byte, error) { - p, err := c.br.ReadSlice('\n') - if err == bufio.ErrBufferFull { - return nil, protocolError("long response line") - } - if err != nil { - return nil, err - } - i := len(p) - 2 - if i < 0 || p[i] != '\r' { - return nil, protocolError("bad response line terminator") - } - return p[:i], nil -} - -// parseLen parses bulk string and array lengths. -func parseLen(p []byte) (int, error) { - if len(p) == 0 { - return -1, protocolError("malformed length") - } - - if p[0] == '-' && len(p) == 2 && p[1] == '1' { - // handle $-1 and $-1 null replies. - return -1, nil - } - - var n int - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return -1, protocolError("illegal bytes in length") - } - n += int(b - '0') - } - - return n, nil -} - -// parseInt parses an integer reply. -func parseInt(p []byte) (interface{}, error) { - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - - var negate bool - if p[0] == '-' { - negate = true - p = p[1:] - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - } - - var n int64 - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return 0, protocolError("illegal bytes in length") - } - n += int64(b - '0') - } - - if negate { - n = -n - } - return n, nil -} - -var ( - okReply interface{} = "OK" - pongReply interface{} = "PONG" -) - -func (c *conn) readReply() (interface{}, error) { - line, err := c.readLine() - if err != nil { - return nil, err - } - if len(line) == 0 { - return nil, protocolError("short response line") - } - switch line[0] { - case '+': - switch { - case len(line) == 3 && line[1] == 'O' && line[2] == 'K': - // Avoid allocation for frequent "+OK" response. - return okReply, nil - case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': - // Avoid allocation in PING command benchmarks :) - return pongReply, nil - default: - return string(line[1:]), nil - } - case '-': - return Error(string(line[1:])), nil - case ':': - return parseInt(line[1:]) - case '$': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - p := make([]byte, n) - _, err = io.ReadFull(c.br, p) - if err != nil { - return nil, err - } - if line, err := c.readLine(); err != nil { - return nil, err - } else if len(line) != 0 { - return nil, protocolError("bad bulk string format") - } - return p, nil - case '*': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - r := make([]interface{}, n) - for i := range r { - r[i], err = c.readReply() - if err != nil { - return nil, err - } - } - return r, nil - } - return nil, protocolError("unexpected response line") -} - -func (c *conn) Send(cmd string, args ...interface{}) error { - c.mu.Lock() - c.pending += 1 - c.mu.Unlock() - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.writeCommand(cmd, args); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Flush() error { - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.bw.Flush(); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Receive() (reply interface{}, err error) { - if c.readTimeout != 0 { - c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) - } - if reply, err = c.readReply(); err != nil { - return nil, c.fatal(err) - } - // When using pub/sub, the number of receives can be greater than the - // number of sends. To enable normal use of the connection after - // unsubscribing from all channels, we do not decrement pending to a - // negative value. - // - // The pending field is decremented after the reply is read to handle the - // case where Receive is called before Send. - c.mu.Lock() - if c.pending > 0 { - c.pending -= 1 - } - c.mu.Unlock() - if err, ok := reply.(Error); ok { - return nil, err - } - return -} - -func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { - c.mu.Lock() - pending := c.pending - c.pending = 0 - c.mu.Unlock() - - if cmd == "" && pending == 0 { - return nil, nil - } - - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - - if cmd != "" { - c.writeCommand(cmd, args) - } - - if err := c.bw.Flush(); err != nil { - return nil, c.fatal(err) - } - - if c.readTimeout != 0 { - c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) - } - - if cmd == "" { - reply := make([]interface{}, pending) - for i := range reply { - r, e := c.readReply() - if e != nil { - return nil, c.fatal(e) - } - reply[i] = r - } - return reply, nil - } - - var err error - var reply interface{} - for i := 0; i <= pending; i++ { - var e error - if reply, e = c.readReply(); e != nil { - return nil, c.fatal(e) - } - if e, ok := reply.(Error); ok && err == nil { - err = e - } - } - return reply, err -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go deleted file mode 100644 index 80037013..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis_test - -import ( - "bufio" - "bytes" - "math" - "net" - "reflect" - "strings" - "testing" - "time" - - "github.com/garyburd/redigo/internal/redistest" - "github.com/garyburd/redigo/redis" -) - -var writeTests = []struct { - args []interface{} - expected string -}{ - { - []interface{}{"SET", "key", "value"}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", - }, - { - []interface{}{"SET", "key", "value"}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", - }, - { - []interface{}{"SET", "key", byte(100)}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", - }, - { - []interface{}{"SET", "key", 100}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", - }, - { - []interface{}{"SET", "key", int64(math.MinInt64)}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$20\r\n-9223372036854775808\r\n", - }, - { - []interface{}{"SET", "key", float64(1349673917.939762)}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$21\r\n1.349673917939762e+09\r\n", - }, - { - []interface{}{"SET", "key", ""}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", - }, - { - []interface{}{"SET", "key", nil}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", - }, - { - []interface{}{"ECHO", true, false}, - "*3\r\n$4\r\nECHO\r\n$1\r\n1\r\n$1\r\n0\r\n", - }, -} - -func TestWrite(t *testing.T) { - for _, tt := range writeTests { - var buf bytes.Buffer - rw := bufio.ReadWriter{Writer: bufio.NewWriter(&buf)} - c := redis.NewConnBufio(rw) - err := c.Send(tt.args[0].(string), tt.args[1:]...) - if err != nil { - t.Errorf("Send(%v) returned error %v", tt.args, err) - continue - } - rw.Flush() - actual := buf.String() - if actual != tt.expected { - t.Errorf("Send(%v) = %q, want %q", tt.args, actual, tt.expected) - } - } -} - -var errorSentinel = &struct{}{} - -var readTests = []struct { - reply string - expected interface{} -}{ - { - "+OK\r\n", - "OK", - }, - { - "+PONG\r\n", - "PONG", - }, - { - "@OK\r\n", - errorSentinel, - }, - { - "$6\r\nfoobar\r\n", - []byte("foobar"), - }, - { - "$-1\r\n", - nil, - }, - { - ":1\r\n", - int64(1), - }, - { - ":-2\r\n", - int64(-2), - }, - { - "*0\r\n", - []interface{}{}, - }, - { - "*-1\r\n", - nil, - }, - { - "*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n", - []interface{}{[]byte("foo"), []byte("bar"), []byte("Hello"), []byte("World")}, - }, - { - "*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n", - []interface{}{[]byte("foo"), nil, []byte("bar")}, - }, - - { - // "x" is not a valid length - "$x\r\nfoobar\r\n", - errorSentinel, - }, - { - // -2 is not a valid length - "$-2\r\n", - errorSentinel, - }, - { - // "x" is not a valid integer - ":x\r\n", - errorSentinel, - }, - { - // missing \r\n following value - "$6\r\nfoobar", - errorSentinel, - }, - { - // short value - "$6\r\nxx", - errorSentinel, - }, - { - // long value - "$6\r\nfoobarx\r\n", - errorSentinel, - }, -} - -func TestRead(t *testing.T) { - for _, tt := range readTests { - rw := bufio.ReadWriter{ - Reader: bufio.NewReader(strings.NewReader(tt.reply)), - Writer: bufio.NewWriter(nil), // writer need to support Flush - } - c := redis.NewConnBufio(rw) - actual, err := c.Receive() - if tt.expected == errorSentinel { - if err == nil { - t.Errorf("Receive(%q) did not return expected error", tt.reply) - } - } else { - if err != nil { - t.Errorf("Receive(%q) returned error %v", tt.reply, err) - continue - } - if !reflect.DeepEqual(actual, tt.expected) { - t.Errorf("Receive(%q) = %v, want %v", tt.reply, actual, tt.expected) - } - } - } -} - -var testCommands = []struct { - args []interface{} - expected interface{} -}{ - { - []interface{}{"PING"}, - "PONG", - }, - { - []interface{}{"SET", "foo", "bar"}, - "OK", - }, - { - []interface{}{"GET", "foo"}, - []byte("bar"), - }, - { - []interface{}{"GET", "nokey"}, - nil, - }, - { - []interface{}{"MGET", "nokey", "foo"}, - []interface{}{nil, []byte("bar")}, - }, - { - []interface{}{"INCR", "mycounter"}, - int64(1), - }, - { - []interface{}{"LPUSH", "mylist", "foo"}, - int64(1), - }, - { - []interface{}{"LPUSH", "mylist", "bar"}, - int64(2), - }, - { - []interface{}{"LRANGE", "mylist", 0, -1}, - []interface{}{[]byte("bar"), []byte("foo")}, - }, - { - []interface{}{"MULTI"}, - "OK", - }, - { - []interface{}{"LRANGE", "mylist", 0, -1}, - "QUEUED", - }, - { - []interface{}{"PING"}, - "QUEUED", - }, - { - []interface{}{"EXEC"}, - []interface{}{ - []interface{}{[]byte("bar"), []byte("foo")}, - "PONG", - }, - }, -} - -func TestDoCommands(t *testing.T) { - c, err := redistest.Dial() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - for _, cmd := range testCommands { - actual, err := c.Do(cmd.args[0].(string), cmd.args[1:]...) - if err != nil { - t.Errorf("Do(%v) returned error %v", cmd.args, err) - continue - } - if !reflect.DeepEqual(actual, cmd.expected) { - t.Errorf("Do(%v) = %v, want %v", cmd.args, actual, cmd.expected) - } - } -} - -func TestPipelineCommands(t *testing.T) { - c, err := redistest.Dial() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - for _, cmd := range testCommands { - if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { - t.Fatalf("Send(%v) returned error %v", cmd.args, err) - } - } - if err := c.Flush(); err != nil { - t.Errorf("Flush() returned error %v", err) - } - for _, cmd := range testCommands { - actual, err := c.Receive() - if err != nil { - t.Fatalf("Receive(%v) returned error %v", cmd.args, err) - } - if !reflect.DeepEqual(actual, cmd.expected) { - t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) - } - } -} - -func TestBlankCommmand(t *testing.T) { - c, err := redistest.Dial() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - for _, cmd := range testCommands { - if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { - t.Fatalf("Send(%v) returned error %v", cmd.args, err) - } - } - reply, err := redis.Values(c.Do("")) - if err != nil { - t.Fatalf("Do() returned error %v", err) - } - if len(reply) != len(testCommands) { - t.Fatalf("len(reply)=%d, want %d", len(reply), len(testCommands)) - } - for i, cmd := range testCommands { - actual := reply[i] - if !reflect.DeepEqual(actual, cmd.expected) { - t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) - } - } -} - -func TestRecvBeforeSend(t *testing.T) { - c, err := redistest.Dial() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - done := make(chan struct{}) - go func() { - c.Receive() - close(done) - }() - time.Sleep(time.Millisecond) - c.Send("PING") - c.Flush() - <-done - _, err = c.Do("") - if err != nil { - t.Fatalf("error=%v", err) - } -} - -func TestError(t *testing.T) { - c, err := redistest.Dial() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - c.Do("SET", "key", "val") - _, err = c.Do("HSET", "key", "fld", "val") - if err == nil { - t.Errorf("Expected err for HSET on string key.") - } - if c.Err() != nil { - t.Errorf("Conn has Err()=%v, expect nil", c.Err()) - } - _, err = c.Do("SET", "key", "val") - if err != nil { - t.Errorf("Do(SET, key, val) returned error %v, expected nil.", err) - } -} - -func TestReadDeadline(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("net.Listen returned %v", err) - } - defer l.Close() - - go func() { - for { - c, err := l.Accept() - if err != nil { - return - } - go func() { - time.Sleep(time.Second) - c.Write([]byte("+OK\r\n")) - c.Close() - }() - } - }() - - c1, err := redis.DialTimeout(l.Addr().Network(), l.Addr().String(), 0, time.Millisecond, 0) - if err != nil { - t.Fatalf("redis.Dial returned %v", err) - } - defer c1.Close() - - _, err = c1.Do("PING") - if err == nil { - t.Fatalf("c1.Do() returned nil, expect error") - } - if c1.Err() == nil { - t.Fatalf("c1.Err() = nil, expect error") - } - - c2, err := redis.DialTimeout(l.Addr().Network(), l.Addr().String(), 0, time.Millisecond, 0) - if err != nil { - t.Fatalf("redis.Dial returned %v", err) - } - defer c2.Close() - - c2.Send("PING") - c2.Flush() - _, err = c2.Receive() - if err == nil { - t.Fatalf("c2.Receive() returned nil, expect error") - } - if c2.Err() == nil { - t.Fatalf("c2.Err() = nil, expect error") - } -} - -// Connect to local instance of Redis running on the default port. -func ExampleDial(x int) { - c, err := redis.Dial("tcp", ":6379") - if err != nil { - // handle error - } - defer c.Close() -} - -// TextExecError tests handling of errors in a transaction. See -// http://redis.io/topics/transactions for information on how Redis handles -// errors in a transaction. -func TestExecError(t *testing.T) { - c, err := redistest.Dial() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - // Execute commands that fail before EXEC is called. - - c.Do("ZADD", "k0", 0, 0) - c.Send("MULTI") - c.Send("NOTACOMMAND", "k0", 0, 0) - c.Send("ZINCRBY", "k0", 0, 0) - v, err := c.Do("EXEC") - if err == nil { - t.Fatalf("EXEC returned values %v, expected error", v) - } - - // Execute commands that fail after EXEC is called. The first command - // returns an error. - - c.Do("ZADD", "k1", 0, 0) - c.Send("MULTI") - c.Send("HSET", "k1", 0, 0) - c.Send("ZINCRBY", "k1", 0, 0) - v, err = c.Do("EXEC") - if err != nil { - t.Fatalf("EXEC returned error %v", err) - } - - vs, err := redis.Values(v, nil) - if err != nil { - t.Fatalf("Values(v) returned error %v", err) - } - - if len(vs) != 2 { - t.Fatalf("len(vs) == %d, want 2", len(vs)) - } - - if _, ok := vs[0].(error); !ok { - t.Fatalf("first result is type %T, expected error", vs[0]) - } - - if _, ok := vs[1].([]byte); !ok { - t.Fatalf("second result is type %T, expected []byte", vs[2]) - } - - // Execute commands that fail after EXEC is called. The second command - // returns an error. - - c.Do("ZADD", "k2", 0, 0) - c.Send("MULTI") - c.Send("ZINCRBY", "k2", 0, 0) - c.Send("HSET", "k2", 0, 0) - v, err = c.Do("EXEC") - if err != nil { - t.Fatalf("EXEC returned error %v", err) - } - - vs, err = redis.Values(v, nil) - if err != nil { - t.Fatalf("Values(v) returned error %v", err) - } - - if len(vs) != 2 { - t.Fatalf("len(vs) == %d, want 2", len(vs)) - } - - if _, ok := vs[0].([]byte); !ok { - t.Fatalf("first result is type %T, expected []byte", vs[0]) - } - - if _, ok := vs[1].(error); !ok { - t.Fatalf("second result is type %T, expected error", vs[2]) - } -} - -func BenchmarkDoEmpty(b *testing.B) { - b.StopTimer() - c, err := redistest.Dial() - if err != nil { - b.Fatal(err) - } - defer c.Close() - b.StartTimer() - for i := 0; i < b.N; i++ { - if _, err := c.Do(""); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDoPing(b *testing.B) { - b.StopTimer() - c, err := redistest.Dial() - if err != nil { - b.Fatal(err) - } - defer c.Close() - b.StartTimer() - for i := 0; i < b.N; i++ { - if _, err := c.Do("PING"); err != nil { - b.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go deleted file mode 100644 index 1ae6f0cc..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package redis is a client for the Redis database. -// -// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more -// documentation about this package. -// -// Connections -// -// The Conn interface is the primary interface for working with Redis. -// Applications create connections by calling the Dial, DialWithTimeout or -// NewConn functions. In the future, functions will be added for creating -// sharded and other types of connections. -// -// The application must call the connection Close method when the application -// is done with the connection. -// -// Executing Commands -// -// The Conn interface has a generic method for executing Redis commands: -// -// Do(commandName string, args ...interface{}) (reply interface{}, err error) -// -// The Redis command reference (http://redis.io/commands) lists the available -// commands. An example of using the Redis APPEND command is: -// -// n, err := conn.Do("APPEND", "key", "value") -// -// The Do method converts command arguments to binary strings for transmission -// to the server as follows: -// -// Go Type Conversion -// []byte Sent as is -// string Sent as is -// int, int64 strconv.FormatInt(v) -// float64 strconv.FormatFloat(v, 'g', -1, 64) -// bool true -> "1", false -> "0" -// nil "" -// all other types fmt.Print(v) -// -// Redis command reply types are represented using the following Go types: -// -// Redis type Go type -// error redis.Error -// integer int64 -// simple string string -// bulk string []byte or nil if value not present. -// array []interface{} or nil if value not present. -// -// Use type assertions or the reply helper functions to convert from -// interface{} to the specific Go type for the command result. -// -// Pipelining -// -// Connections support pipelining using the Send, Flush and Receive methods. -// -// Send(commandName string, args ...interface{}) error -// Flush() error -// Receive() (reply interface{}, err error) -// -// Send writes the command to the connection's output buffer. Flush flushes the -// connection's output buffer to the server. Receive reads a single reply from -// the server. The following example shows a simple pipeline. -// -// c.Send("SET", "foo", "bar") -// c.Send("GET", "foo") -// c.Flush() -// c.Receive() // reply from SET -// v, err = c.Receive() // reply from GET -// -// The Do method combines the functionality of the Send, Flush and Receive -// methods. The Do method starts by writing the command and flushing the output -// buffer. Next, the Do method receives all pending replies including the reply -// for the command just sent by Do. If any of the received replies is an error, -// then Do returns the error. If there are no errors, then Do returns the last -// reply. If the command argument to the Do method is "", then the Do method -// will flush the output buffer and receive pending replies without sending a -// command. -// -// Use the Send and Do methods to implement pipelined transactions. -// -// c.Send("MULTI") -// c.Send("INCR", "foo") -// c.Send("INCR", "bar") -// r, err := c.Do("EXEC") -// fmt.Println(r) // prints [1, 1] -// -// Concurrency -// -// Connections do not support concurrent calls to the write methods (Send, -// Flush) or concurrent calls to the read method (Receive). Connections do -// allow a concurrent reader and writer. -// -// Because the Do method combines the functionality of Send, Flush and Receive, -// the Do method cannot be called concurrently with the other methods. -// -// For full concurrent access to Redis, use the thread-safe Pool to get and -// release connections from within a goroutine. -// -// Publish and Subscribe -// -// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. -// -// c.Send("SUBSCRIBE", "example") -// c.Flush() -// for { -// reply, err := c.Receive() -// if err != nil { -// return err -// } -// // process pushed message -// } -// -// The PubSubConn type wraps a Conn with convenience methods for implementing -// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods -// send and flush a subscription management command. The receive method -// converts a pushed message to convenient types for use in a type switch. -// -// psc := redis.PubSubConn{c} -// psc.Subscribe("example") -// for { -// switch v := psc.Receive().(type) { -// case redis.Message: -// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) -// case redis.Subscription: -// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) -// case error: -// return v -// } -// } -// -// Reply Helpers -// -// The Bool, Int, Bytes, String, Strings and Values functions convert a reply -// to a value of a specific type. To allow convenient wrapping of calls to the -// connection Do and Receive methods, the functions take a second argument of -// type error. If the error is non-nil, then the helper function returns the -// error. If the error is nil, the function converts the reply to the specified -// type: -// -// exists, err := redis.Bool(c.Do("EXISTS", "foo")) -// if err != nil { -// // handle error return from c.Do or type conversion error. -// } -// -// The Scan function converts elements of a array reply to Go types: -// -// var value1 int -// var value2 string -// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) -// if err != nil { -// // handle error -// } -// if _, err := redis.Scan(reply, &value1, &value2); err != nil { -// // handle error -// } -package redis diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go deleted file mode 100644 index 129b86d6..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "fmt" - "log" -) - -// NewLoggingConn returns a logging wrapper around a connection. -func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { - if prefix != "" { - prefix = prefix + "." - } - return &loggingConn{conn, logger, prefix} -} - -type loggingConn struct { - Conn - logger *log.Logger - prefix string -} - -func (c *loggingConn) Close() error { - err := c.Conn.Close() - var buf bytes.Buffer - fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) - c.logger.Output(2, buf.String()) - return err -} - -func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { - const chop = 32 - switch v := v.(type) { - case []byte: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case string: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case []interface{}: - if len(v) == 0 { - buf.WriteString("[]") - } else { - sep := "[" - fin := "]" - if len(v) > chop { - v = v[:chop] - fin = "...]" - } - for _, vv := range v { - buf.WriteString(sep) - c.printValue(buf, vv) - sep = ", " - } - buf.WriteString(fin) - } - default: - fmt.Fprint(buf, v) - } -} - -func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%s%s(", c.prefix, method) - if method != "Receive" { - buf.WriteString(commandName) - for _, arg := range args { - buf.WriteString(", ") - c.printValue(&buf, arg) - } - } - buf.WriteString(") -> (") - if method != "Send" { - c.printValue(&buf, reply) - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%v)", err) - c.logger.Output(3, buf.String()) -} - -func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { - reply, err := c.Conn.Do(commandName, args...) - c.print("Do", commandName, args, reply, err) - return reply, err -} - -func (c *loggingConn) Send(commandName string, args ...interface{}) error { - err := c.Conn.Send(commandName, args...) - c.print("Send", commandName, args, nil, err) - return err -} - -func (c *loggingConn) Receive() (interface{}, error) { - reply, err := c.Conn.Receive() - c.print("Receive", "", nil, reply, err) - return reply, err -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go deleted file mode 100644 index 9daf2e33..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "container/list" - "crypto/rand" - "crypto/sha1" - "errors" - "io" - "strconv" - "sync" - "time" - - "github.com/garyburd/redigo/internal" -) - -var nowFunc = time.Now // for testing - -// ErrPoolExhausted is returned from a pool connection method (Do, Send, -// Receive, Flush, Err) when the maximum number of database connections in the -// pool has been reached. -var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") - -var ( - errPoolClosed = errors.New("redigo: connection pool closed") - errConnClosed = errors.New("redigo: connection closed") -) - -// Pool maintains a pool of connections. The application calls the Get method -// to get a connection from the pool and the connection's Close method to -// return the connection's resources to the pool. -// -// The following example shows how to use a pool in a web application. The -// application creates a pool at application startup and makes it available to -// request handlers using a global variable. -// -// func newPool(server, password string) *redis.Pool { -// return &redis.Pool{ -// MaxIdle: 3, -// IdleTimeout: 240 * time.Second, -// Dial: func () (redis.Conn, error) { -// c, err := redis.Dial("tcp", server) -// if err != nil { -// return nil, err -// } -// if _, err := c.Do("AUTH", password); err != nil { -// c.Close() -// return nil, err -// } -// return c, err -// }, -// TestOnBorrow: func(c redis.Conn, t time.Time) error { -// _, err := c.Do("PING") -// return err -// }, -// } -// } -// -// var ( -// pool *redis.Pool -// redisServer = flag.String("redisServer", ":6379", "") -// redisPassword = flag.String("redisPassword", "", "") -// ) -// -// func main() { -// flag.Parse() -// pool = newPool(*redisServer, *redisPassword) -// ... -// } -// -// A request handler gets a connection from the pool and closes the connection -// when the handler is done: -// -// func serveHome(w http.ResponseWriter, r *http.Request) { -// conn := pool.Get() -// defer conn.Close() -// .... -// } -// -type Pool struct { - - // Dial is an application supplied function for creating and configuring a - // connection - Dial func() (Conn, error) - - // TestOnBorrow is an optional application supplied function for checking - // the health of an idle connection before the connection is used again by - // the application. Argument t is the time that the connection was returned - // to the pool. If the function returns an error, then the connection is - // closed. - TestOnBorrow func(c Conn, t time.Time) error - - // Maximum number of idle connections in the pool. - MaxIdle int - - // Maximum number of connections allocated by the pool at a given time. - // When zero, there is no limit on the number of connections in the pool. - MaxActive int - - // Close connections after remaining idle for this duration. If the value - // is zero, then idle connections are not closed. Applications should set - // the timeout to a value less than the server's timeout. - IdleTimeout time.Duration - - // If Wait is true and the pool is at the MaxIdle limit, then Get() waits - // for a connection to be returned to the pool before returning. - Wait bool - - // mu protects fields defined below. - mu sync.Mutex - cond *sync.Cond - closed bool - active int - - // Stack of idleConn with most recently used at the front. - idle list.List -} - -type idleConn struct { - c Conn - t time.Time -} - -// NewPool creates a new pool. This function is deprecated. Applications should -// initialize the Pool fields directly as shown in example. -func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { - return &Pool{Dial: newFn, MaxIdle: maxIdle} -} - -// Get gets a connection. The application must close the returned connection. -// This method always returns a valid connection so that applications can defer -// error handling to the first use of the connection. If there is an error -// getting an underlying connection, then the connection Err, Do, Send, Flush -// and Receive methods return that error. -func (p *Pool) Get() Conn { - c, err := p.get() - if err != nil { - return errorConnection{err} - } - return &pooledConnection{p: p, c: c} -} - -// ActiveCount returns the number of active connections in the pool. -func (p *Pool) ActiveCount() int { - p.mu.Lock() - active := p.active - p.mu.Unlock() - return active -} - -// Close releases the resources used by the pool. -func (p *Pool) Close() error { - p.mu.Lock() - idle := p.idle - p.idle.Init() - p.closed = true - p.active -= idle.Len() - if p.cond != nil { - p.cond.Broadcast() - } - p.mu.Unlock() - for e := idle.Front(); e != nil; e = e.Next() { - e.Value.(idleConn).c.Close() - } - return nil -} - -// release decrements the active count and signals waiters. The caller must -// hold p.mu during the call. -func (p *Pool) release() { - p.active -= 1 - if p.cond != nil { - p.cond.Signal() - } -} - -// get prunes stale connections and returns a connection from the idle list or -// creates a new connection. -func (p *Pool) get() (Conn, error) { - p.mu.Lock() - - // Prune stale connections. - - if timeout := p.IdleTimeout; timeout > 0 { - for i, n := 0, p.idle.Len(); i < n; i++ { - e := p.idle.Back() - if e == nil { - break - } - ic := e.Value.(idleConn) - if ic.t.Add(timeout).After(nowFunc()) { - break - } - p.idle.Remove(e) - p.release() - p.mu.Unlock() - ic.c.Close() - p.mu.Lock() - } - } - - for { - - // Get idle connection. - - for i, n := 0, p.idle.Len(); i < n; i++ { - e := p.idle.Front() - if e == nil { - break - } - ic := e.Value.(idleConn) - p.idle.Remove(e) - test := p.TestOnBorrow - p.mu.Unlock() - if test == nil || test(ic.c, ic.t) == nil { - return ic.c, nil - } - ic.c.Close() - p.mu.Lock() - p.release() - } - - // Check for pool closed before dialing a new connection. - - if p.closed { - p.mu.Unlock() - return nil, errors.New("redigo: get on closed pool") - } - - // Dial new connection if under limit. - - if p.MaxActive == 0 || p.active < p.MaxActive { - dial := p.Dial - p.active += 1 - p.mu.Unlock() - c, err := dial() - if err != nil { - p.mu.Lock() - p.release() - p.mu.Unlock() - c = nil - } - return c, err - } - - if !p.Wait { - p.mu.Unlock() - return nil, ErrPoolExhausted - } - - if p.cond == nil { - p.cond = sync.NewCond(&p.mu) - } - p.cond.Wait() - } -} - -func (p *Pool) put(c Conn, forceClose bool) error { - err := c.Err() - p.mu.Lock() - if !p.closed && err == nil && !forceClose { - p.idle.PushFront(idleConn{t: nowFunc(), c: c}) - if p.idle.Len() > p.MaxIdle { - c = p.idle.Remove(p.idle.Back()).(idleConn).c - } else { - c = nil - } - } - - if c == nil { - if p.cond != nil { - p.cond.Signal() - } - p.mu.Unlock() - return nil - } - - p.release() - p.mu.Unlock() - return c.Close() -} - -type pooledConnection struct { - p *Pool - c Conn - state int -} - -var ( - sentinel []byte - sentinelOnce sync.Once -) - -func initSentinel() { - p := make([]byte, 64) - if _, err := rand.Read(p); err == nil { - sentinel = p - } else { - h := sha1.New() - io.WriteString(h, "Oops, rand failed. Use time instead.") - io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) - sentinel = h.Sum(nil) - } -} - -func (pc *pooledConnection) Close() error { - c := pc.c - if _, ok := c.(errorConnection); ok { - return nil - } - pc.c = errorConnection{errConnClosed} - - if pc.state&internal.MultiState != 0 { - c.Send("DISCARD") - pc.state &^= (internal.MultiState | internal.WatchState) - } else if pc.state&internal.WatchState != 0 { - c.Send("UNWATCH") - pc.state &^= internal.WatchState - } - if pc.state&internal.SubscribeState != 0 { - c.Send("UNSUBSCRIBE") - c.Send("PUNSUBSCRIBE") - // To detect the end of the message stream, ask the server to echo - // a sentinel value and read until we see that value. - sentinelOnce.Do(initSentinel) - c.Send("ECHO", sentinel) - c.Flush() - for { - p, err := c.Receive() - if err != nil { - break - } - if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { - pc.state &^= internal.SubscribeState - break - } - } - } - c.Do("") - pc.p.put(c, pc.state != 0) - return nil -} - -func (pc *pooledConnection) Err() error { - return pc.c.Err() -} - -func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - ci := internal.LookupCommandInfo(commandName) - pc.state = (pc.state | ci.Set) &^ ci.Clear - return pc.c.Do(commandName, args...) -} - -func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { - ci := internal.LookupCommandInfo(commandName) - pc.state = (pc.state | ci.Set) &^ ci.Clear - return pc.c.Send(commandName, args...) -} - -func (pc *pooledConnection) Flush() error { - return pc.c.Flush() -} - -func (pc *pooledConnection) Receive() (reply interface{}, err error) { - return pc.c.Receive() -} - -type errorConnection struct{ err error } - -func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } -func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } -func (ec errorConnection) Err() error { return ec.err } -func (ec errorConnection) Close() error { return ec.err } -func (ec errorConnection) Flush() error { return ec.err } -func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go deleted file mode 100644 index 1fe305f1..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go +++ /dev/null @@ -1,674 +0,0 @@ -// Copyright 2011 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis_test - -import ( - "errors" - "io" - "reflect" - "sync" - "testing" - "time" - - "github.com/garyburd/redigo/internal/redistest" - "github.com/garyburd/redigo/redis" -) - -type poolTestConn struct { - d *poolDialer - err error - redis.Conn -} - -func (c *poolTestConn) Close() error { c.d.open -= 1; return nil } -func (c *poolTestConn) Err() error { return c.err } - -func (c *poolTestConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - if commandName == "ERR" { - c.err = args[0].(error) - commandName = "PING" - } - if commandName != "" { - c.d.commands = append(c.d.commands, commandName) - } - return c.Conn.Do(commandName, args...) -} - -func (c *poolTestConn) Send(commandName string, args ...interface{}) error { - c.d.commands = append(c.d.commands, commandName) - return c.Conn.Send(commandName, args...) -} - -type poolDialer struct { - t *testing.T - dialed int - open int - commands []string - dialErr error -} - -func (d *poolDialer) dial() (redis.Conn, error) { - d.dialed += 1 - if d.dialErr != nil { - return nil, d.dialErr - } - c, err := redistest.Dial() - if err != nil { - return nil, err - } - d.open += 1 - return &poolTestConn{d: d, Conn: c}, nil -} - -func (d *poolDialer) check(message string, p *redis.Pool, dialed, open int) { - if d.dialed != dialed { - d.t.Errorf("%s: dialed=%d, want %d", message, d.dialed, dialed) - } - if d.open != open { - d.t.Errorf("%s: open=%d, want %d", message, d.open, open) - } - if active := p.ActiveCount(); active != open { - d.t.Errorf("%s: active=%d, want %d", message, active, open) - } -} - -func TestPoolReuse(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - Dial: d.dial, - } - - for i := 0; i < 10; i++ { - c1 := p.Get() - c1.Do("PING") - c2 := p.Get() - c2.Do("PING") - c1.Close() - c2.Close() - } - - d.check("before close", p, 2, 2) - p.Close() - d.check("after close", p, 2, 0) -} - -func TestPoolMaxIdle(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - Dial: d.dial, - } - for i := 0; i < 10; i++ { - c1 := p.Get() - c1.Do("PING") - c2 := p.Get() - c2.Do("PING") - c3 := p.Get() - c3.Do("PING") - c1.Close() - c2.Close() - c3.Close() - } - d.check("before close", p, 12, 2) - p.Close() - d.check("after close", p, 12, 0) -} - -func TestPoolError(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - Dial: d.dial, - } - - c := p.Get() - c.Do("ERR", io.EOF) - if c.Err() == nil { - t.Errorf("expected c.Err() != nil") - } - c.Close() - - c = p.Get() - c.Do("ERR", io.EOF) - c.Close() - - d.check(".", p, 2, 0) -} - -func TestPoolClose(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - Dial: d.dial, - } - - c1 := p.Get() - c1.Do("PING") - c2 := p.Get() - c2.Do("PING") - c3 := p.Get() - c3.Do("PING") - - c1.Close() - if _, err := c1.Do("PING"); err == nil { - t.Errorf("expected error after connection closed") - } - - c2.Close() - c2.Close() - - p.Close() - - d.check("after pool close", p, 3, 1) - - if _, err := c1.Do("PING"); err == nil { - t.Errorf("expected error after connection and pool closed") - } - - c3.Close() - - d.check("after conn close", p, 3, 0) - - c1 = p.Get() - if _, err := c1.Do("PING"); err == nil { - t.Errorf("expected error after pool closed") - } -} - -func TestPoolTimeout(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - IdleTimeout: 300 * time.Second, - Dial: d.dial, - } - - now := time.Now() - redis.SetNowFunc(func() time.Time { return now }) - defer redis.SetNowFunc(time.Now) - - c := p.Get() - c.Do("PING") - c.Close() - - d.check("1", p, 1, 1) - - now = now.Add(p.IdleTimeout) - - c = p.Get() - c.Do("PING") - c.Close() - - d.check("2", p, 2, 1) - - p.Close() -} - -func TestPoolConcurrenSendReceive(t *testing.T) { - p := &redis.Pool{ - Dial: redistest.Dial, - } - c := p.Get() - done := make(chan error, 1) - go func() { - _, err := c.Receive() - done <- err - }() - c.Send("PING") - c.Flush() - err := <-done - if err != nil { - t.Fatalf("Receive() returned error %v", err) - } - _, err = c.Do("") - if err != nil { - t.Fatalf("Do() returned error %v", err) - } - c.Close() - p.Close() -} - -func TestPoolBorrowCheck(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - Dial: d.dial, - TestOnBorrow: func(redis.Conn, time.Time) error { return redis.Error("BLAH") }, - } - - for i := 0; i < 10; i++ { - c := p.Get() - c.Do("PING") - c.Close() - } - d.check("1", p, 10, 1) - p.Close() -} - -func TestPoolMaxActive(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - MaxActive: 2, - Dial: d.dial, - } - c1 := p.Get() - c1.Do("PING") - c2 := p.Get() - c2.Do("PING") - - d.check("1", p, 2, 2) - - c3 := p.Get() - if _, err := c3.Do("PING"); err != redis.ErrPoolExhausted { - t.Errorf("expected pool exhausted") - } - - c3.Close() - d.check("2", p, 2, 2) - c2.Close() - d.check("3", p, 2, 2) - - c3 = p.Get() - if _, err := c3.Do("PING"); err != nil { - t.Errorf("expected good channel, err=%v", err) - } - c3.Close() - - d.check("4", p, 2, 2) - p.Close() -} - -func TestPoolMonitorCleanup(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - MaxActive: 2, - Dial: d.dial, - } - c := p.Get() - c.Send("MONITOR") - c.Close() - - d.check("", p, 1, 0) - p.Close() -} - -func TestPoolPubSubCleanup(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - MaxActive: 2, - Dial: d.dial, - } - - c := p.Get() - c.Send("SUBSCRIBE", "x") - c.Close() - - want := []string{"SUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get() - c.Send("PSUBSCRIBE", "x*") - c.Close() - - want = []string{"PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - p.Close() -} - -func TestPoolTransactionCleanup(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 2, - MaxActive: 2, - Dial: d.dial, - } - - c := p.Get() - c.Do("WATCH", "key") - c.Do("PING") - c.Close() - - want := []string{"WATCH", "PING", "UNWATCH"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get() - c.Do("WATCH", "key") - c.Do("UNWATCH") - c.Do("PING") - c.Close() - - want = []string{"WATCH", "UNWATCH", "PING"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get() - c.Do("WATCH", "key") - c.Do("MULTI") - c.Do("PING") - c.Close() - - want = []string{"WATCH", "MULTI", "PING", "DISCARD"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get() - c.Do("WATCH", "key") - c.Do("MULTI") - c.Do("DISCARD") - c.Do("PING") - c.Close() - - want = []string{"WATCH", "MULTI", "DISCARD", "PING"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get() - c.Do("WATCH", "key") - c.Do("MULTI") - c.Do("EXEC") - c.Do("PING") - c.Close() - - want = []string{"WATCH", "MULTI", "EXEC", "PING"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - p.Close() -} - -func startGoroutines(p *redis.Pool, cmd string, args ...interface{}) chan error { - errs := make(chan error, 10) - for i := 0; i < cap(errs); i++ { - go func() { - c := p.Get() - _, err := c.Do(cmd, args...) - errs <- err - c.Close() - }() - } - - // Wait for goroutines to block. - time.Sleep(time.Second / 4) - - return errs -} - -func TestWaitPool(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 1, - MaxActive: 1, - Dial: d.dial, - Wait: true, - } - defer p.Close() - c := p.Get() - errs := startGoroutines(p, "PING") - d.check("before close", p, 1, 1) - c.Close() - timeout := time.After(2 * time.Second) - for i := 0; i < cap(errs); i++ { - select { - case err := <-errs: - if err != nil { - t.Fatal(err) - } - case <-timeout: - t.Fatalf("timeout waiting for blocked goroutine %d", i) - } - } - d.check("done", p, 1, 1) -} - -func TestWaitPoolClose(t *testing.T) { - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 1, - MaxActive: 1, - Dial: d.dial, - Wait: true, - } - c := p.Get() - if _, err := c.Do("PING"); err != nil { - t.Fatal(err) - } - errs := startGoroutines(p, "PING") - d.check("before close", p, 1, 1) - p.Close() - timeout := time.After(2 * time.Second) - for i := 0; i < cap(errs); i++ { - select { - case err := <-errs: - switch err { - case nil: - t.Fatal("blocked goroutine did not get error") - case redis.ErrPoolExhausted: - t.Fatal("blocked goroutine got pool exhausted error") - } - case <-timeout: - t.Fatal("timeout waiting for blocked goroutine") - } - } - c.Close() - d.check("done", p, 1, 0) -} - -func TestWaitPoolCommandError(t *testing.T) { - testErr := errors.New("test") - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 1, - MaxActive: 1, - Dial: d.dial, - Wait: true, - } - defer p.Close() - c := p.Get() - errs := startGoroutines(p, "ERR", testErr) - d.check("before close", p, 1, 1) - c.Close() - timeout := time.After(2 * time.Second) - for i := 0; i < cap(errs); i++ { - select { - case err := <-errs: - if err != nil { - t.Fatal(err) - } - case <-timeout: - t.Fatalf("timeout waiting for blocked goroutine %d", i) - } - } - d.check("done", p, cap(errs), 0) -} - -func TestWaitPoolDialError(t *testing.T) { - testErr := errors.New("test") - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: 1, - MaxActive: 1, - Dial: d.dial, - Wait: true, - } - defer p.Close() - c := p.Get() - errs := startGoroutines(p, "ERR", testErr) - d.check("before close", p, 1, 1) - - d.dialErr = errors.New("dial") - c.Close() - - nilCount := 0 - errCount := 0 - timeout := time.After(2 * time.Second) - for i := 0; i < cap(errs); i++ { - select { - case err := <-errs: - switch err { - case nil: - nilCount++ - case d.dialErr: - errCount++ - default: - t.Fatalf("expected dial error or nil, got %v", err) - } - case <-timeout: - t.Fatalf("timeout waiting for blocked goroutine %d", i) - } - } - if nilCount != 1 { - t.Errorf("expected one nil error, got %d", nilCount) - } - if errCount != cap(errs)-1 { - t.Errorf("expected %d dial erors, got %d", cap(errs)-1, errCount) - } - d.check("done", p, cap(errs), 0) -} - -// Borrowing requires us to iterate over the idle connections, unlock the pool, -// and perform a blocking operation to check the connection still works. If -// TestOnBorrow fails, we must reacquire the lock and continue iteration. This -// test ensures that iteration will work correctly if multiple threads are -// iterating simultaneously. -func TestLocking_TestOnBorrowFails_PoolDoesntCrash(t *testing.T) { - count := 100 - - // First we'll Create a pool where the pilfering of idle connections fails. - d := poolDialer{t: t} - p := &redis.Pool{ - MaxIdle: count, - MaxActive: count, - Dial: d.dial, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - return errors.New("No way back into the real world.") - }, - } - defer p.Close() - - // Fill the pool with idle connections. - b1 := sync.WaitGroup{} - b1.Add(count) - b2 := sync.WaitGroup{} - b2.Add(count) - for i := 0; i < count; i++ { - go func() { - c := p.Get() - if c.Err() != nil { - t.Errorf("pool get failed: %v", c.Err()) - } - b1.Done() - b1.Wait() - c.Close() - b2.Done() - }() - } - b2.Wait() - if d.dialed != count { - t.Errorf("Expected %d dials, got %d", count, d.dialed) - } - - // Spawn a bunch of goroutines to thrash the pool. - b2.Add(count) - for i := 0; i < count; i++ { - go func() { - c := p.Get() - if c.Err() != nil { - t.Errorf("pool get failed: %v", c.Err()) - } - c.Close() - b2.Done() - }() - } - b2.Wait() - if d.dialed != count*2 { - t.Errorf("Expected %d dials, got %d", count*2, d.dialed) - } -} - -func BenchmarkPoolGet(b *testing.B) { - b.StopTimer() - p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} - c := p.Get() - if err := c.Err(); err != nil { - b.Fatal(err) - } - c.Close() - defer p.Close() - b.StartTimer() - for i := 0; i < b.N; i++ { - c = p.Get() - c.Close() - } -} - -func BenchmarkPoolGetErr(b *testing.B) { - b.StopTimer() - p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} - c := p.Get() - if err := c.Err(); err != nil { - b.Fatal(err) - } - c.Close() - defer p.Close() - b.StartTimer() - for i := 0; i < b.N; i++ { - c = p.Get() - if err := c.Err(); err != nil { - b.Fatal(err) - } - c.Close() - } -} - -func BenchmarkPoolGetPing(b *testing.B) { - b.StopTimer() - p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} - c := p.Get() - if err := c.Err(); err != nil { - b.Fatal(err) - } - c.Close() - defer p.Close() - b.StartTimer() - for i := 0; i < b.N; i++ { - c = p.Get() - if _, err := c.Do("PING"); err != nil { - b.Fatal(err) - } - c.Close() - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go deleted file mode 100644 index f0790429..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" -) - -// Subscription represents a subscribe or unsubscribe notification. -type Subscription struct { - - // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" - Kind string - - // The channel that was changed. - Channel string - - // The current number of subscriptions for connection. - Count int -} - -// Message represents a message notification. -type Message struct { - - // The originating channel. - Channel string - - // The message data. - Data []byte -} - -// PMessage represents a pmessage notification. -type PMessage struct { - - // The matched pattern. - Pattern string - - // The originating channel. - Channel string - - // The message data. - Data []byte -} - -// PubSubConn wraps a Conn with convenience methods for subscribers. -type PubSubConn struct { - Conn Conn -} - -// Close closes the connection. -func (c PubSubConn) Close() error { - return c.Conn.Close() -} - -// Subscribe subscribes the connection to the specified channels. -func (c PubSubConn) Subscribe(channel ...interface{}) error { - c.Conn.Send("SUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PSubscribe subscribes the connection to the given patterns. -func (c PubSubConn) PSubscribe(channel ...interface{}) error { - c.Conn.Send("PSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Unsubscribe unsubscribes the connection from the given channels, or from all -// of them if none is given. -func (c PubSubConn) Unsubscribe(channel ...interface{}) error { - c.Conn.Send("UNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PUnsubscribe unsubscribes the connection from the given patterns, or from all -// of them if none is given. -func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { - c.Conn.Send("PUNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Receive returns a pushed message as a Subscription, Message, PMessage or -// error. The return value is intended to be used directly in a type switch as -// illustrated in the PubSubConn example. -func (c PubSubConn) Receive() interface{} { - reply, err := Values(c.Conn.Receive()) - if err != nil { - return err - } - - var kind string - reply, err = Scan(reply, &kind) - if err != nil { - return err - } - - switch kind { - case "message": - var m Message - if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { - return err - } - return m - case "pmessage": - var pm PMessage - if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { - return err - } - return pm - case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": - s := Subscription{Kind: kind} - if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { - return err - } - return s - } - return errors.New("redigo: unknown pubsub notification") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go deleted file mode 100644 index 707f5a47..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis_test - -import ( - "fmt" - "net" - "reflect" - "sync" - "testing" - "time" - - "github.com/garyburd/redigo/internal/redistest" - "github.com/garyburd/redigo/redis" -) - -func publish(channel, value interface{}) { - c, err := dial() - if err != nil { - panic(err) - } - defer c.Close() - c.Do("PUBLISH", channel, value) -} - -// Applications can receive pushed messages from one goroutine and manage subscriptions from another goroutine. -func ExamplePubSubConn() { - c, err := dial() - if err != nil { - panic(err) - } - defer c.Close() - var wg sync.WaitGroup - wg.Add(2) - - psc := redis.PubSubConn{Conn: c} - - // This goroutine receives and prints pushed notifications from the server. - // The goroutine exits when the connection is unsubscribed from all - // channels or there is an error. - go func() { - defer wg.Done() - for { - switch n := psc.Receive().(type) { - case redis.Message: - fmt.Printf("Message: %s %s\n", n.Channel, n.Data) - case redis.PMessage: - fmt.Printf("PMessage: %s %s %s\n", n.Pattern, n.Channel, n.Data) - case redis.Subscription: - fmt.Printf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count) - if n.Count == 0 { - return - } - case error: - fmt.Printf("error: %v\n", n) - return - } - } - }() - - // This goroutine manages subscriptions for the connection. - go func() { - defer wg.Done() - - psc.Subscribe("example") - psc.PSubscribe("p*") - - // The following function calls publish a message using another - // connection to the Redis server. - publish("example", "hello") - publish("example", "world") - publish("pexample", "foo") - publish("pexample", "bar") - - // Unsubscribe from all connections. This will cause the receiving - // goroutine to exit. - psc.Unsubscribe() - psc.PUnsubscribe() - }() - - wg.Wait() - - // Output: - // Subscription: subscribe example 1 - // Subscription: psubscribe p* 2 - // Message: example hello - // Message: example world - // PMessage: p* pexample foo - // PMessage: p* pexample bar - // Subscription: unsubscribe example 1 - // Subscription: punsubscribe p* 0 -} - -func expectPushed(t *testing.T, c redis.PubSubConn, message string, expected interface{}) { - actual := c.Receive() - if !reflect.DeepEqual(actual, expected) { - t.Errorf("%s = %v, want %v", message, actual, expected) - } -} - -func TestPushed(t *testing.T) { - pc, err := redistest.Dial() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer pc.Close() - - nc, err := net.Dial("tcp", ":6379") - if err != nil { - t.Fatal(err) - } - defer nc.Close() - nc.SetReadDeadline(time.Now().Add(4 * time.Second)) - - c := redis.PubSubConn{Conn: redis.NewConn(nc, 0, 0)} - - c.Subscribe("c1") - expectPushed(t, c, "Subscribe(c1)", redis.Subscription{Kind: "subscribe", Channel: "c1", Count: 1}) - c.Subscribe("c2") - expectPushed(t, c, "Subscribe(c2)", redis.Subscription{Kind: "subscribe", Channel: "c2", Count: 2}) - c.PSubscribe("p1") - expectPushed(t, c, "PSubscribe(p1)", redis.Subscription{Kind: "psubscribe", Channel: "p1", Count: 3}) - c.PSubscribe("p2") - expectPushed(t, c, "PSubscribe(p2)", redis.Subscription{Kind: "psubscribe", Channel: "p2", Count: 4}) - c.PUnsubscribe() - expectPushed(t, c, "Punsubscribe(p1)", redis.Subscription{Kind: "punsubscribe", Channel: "p1", Count: 3}) - expectPushed(t, c, "Punsubscribe()", redis.Subscription{Kind: "punsubscribe", Channel: "p2", Count: 2}) - - pc.Do("PUBLISH", "c1", "hello") - expectPushed(t, c, "PUBLISH c1 hello", redis.Message{Channel: "c1", Data: []byte("hello")}) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go deleted file mode 100644 index c90a48ed..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -// Error represents an error returned in a command reply. -type Error string - -func (err Error) Error() string { return string(err) } - -// Conn represents a connection to a Redis server. -type Conn interface { - // Close closes the connection. - Close() error - - // Err returns a non-nil value if the connection is broken. The returned - // value is either the first non-nil value returned from the underlying - // network connection or a protocol parsing error. Applications should - // close broken connections. - Err() error - - // Do sends a command to the server and returns the received reply. - Do(commandName string, args ...interface{}) (reply interface{}, err error) - - // Send writes the command to the client's output buffer. - Send(commandName string, args ...interface{}) error - - // Flush flushes the output buffer to the Redis server. - Flush() error - - // Receive receives a single reply from the Redis server - Receive() (reply interface{}, err error) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go deleted file mode 100644 index 5648f930..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "strconv" -) - -// ErrNil indicates that a reply value is nil. -var ErrNil = errors.New("redigo: nil returned") - -// Int is a helper that converts a command reply to an integer. If err is not -// equal to nil, then Int returns 0, err. Otherwise, Int converts the -// reply to an int as follows: -// -// Reply type Result -// integer int(reply), nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int(reply interface{}, err error) (int, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - x := int(reply) - if int64(x) != reply { - return 0, strconv.ErrRange - } - return x, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 0) - return int(n), err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) -} - -// Int64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int64(reply interface{}, err error) (int64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - return reply, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) -} - -var errNegativeInt = errors.New("redigo: unexpected value for Uint64") - -// Uint64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Uint64(reply interface{}, err error) (uint64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - if reply < 0 { - return 0, errNegativeInt - } - return uint64(reply), nil - case []byte: - n, err := strconv.ParseUint(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) -} - -// Float64 is a helper that converts a command reply to 64 bit float. If err is -// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts -// the reply to an int as follows: -// -// Reply type Result -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Float64(reply interface{}, err error) (float64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case []byte: - n, err := strconv.ParseFloat(string(reply), 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) -} - -// String is a helper that converts a command reply to a string. If err is not -// equal to nil, then String returns "", err. Otherwise String converts the -// reply to a string as follows: -// -// Reply type Result -// bulk string string(reply), nil -// simple string reply, nil -// nil "", ErrNil -// other "", error -func String(reply interface{}, err error) (string, error) { - if err != nil { - return "", err - } - switch reply := reply.(type) { - case []byte: - return string(reply), nil - case string: - return reply, nil - case nil: - return "", ErrNil - case Error: - return "", reply - } - return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) -} - -// Bytes is a helper that converts a command reply to a slice of bytes. If err -// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts -// the reply to a slice of bytes as follows: -// -// Reply type Result -// bulk string reply, nil -// simple string []byte(reply), nil -// nil nil, ErrNil -// other nil, error -func Bytes(reply interface{}, err error) ([]byte, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []byte: - return reply, nil - case string: - return []byte(reply), nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) -} - -// Bool is a helper that converts a command reply to a boolean. If err is not -// equal to nil, then Bool returns false, err. Otherwise Bool converts the -// reply to boolean as follows: -// -// Reply type Result -// integer value != 0, nil -// bulk string strconv.ParseBool(reply) -// nil false, ErrNil -// other false, error -func Bool(reply interface{}, err error) (bool, error) { - if err != nil { - return false, err - } - switch reply := reply.(type) { - case int64: - return reply != 0, nil - case []byte: - return strconv.ParseBool(string(reply)) - case nil: - return false, ErrNil - case Error: - return false, reply - } - return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) -} - -// MultiBulk is deprecated. Use Values. -func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } - -// Values is a helper that converts an array command reply to a []interface{}. -// If err is not equal to nil, then Values returns nil, err. Otherwise, Values -// converts the reply as follows: -// -// Reply type Result -// array reply, nil -// nil nil, ErrNil -// other nil, error -func Values(reply interface{}, err error) ([]interface{}, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - return reply, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) -} - -// Strings is a helper that converts an array command reply to a []string. If -// err is not equal to nil, then Strings returns nil, err. Nil array items are -// converted to "" in the output slice. Strings returns an error if an array -// item is not a bulk string or nil. -func Strings(reply interface{}, err error) ([]string, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - result := make([]string, len(reply)) - for i := range reply { - if reply[i] == nil { - continue - } - p, ok := reply[i].([]byte) - if !ok { - return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) - } - result[i] = string(p) - } - return result, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply) -} - -// Ints is a helper that converts an array command reply to a []int. If -// err is not equal to nil, then Ints returns nil, err. -func Ints(reply interface{}, err error) ([]int, error) { - var ints []int - if reply == nil { - return ints, ErrNil - } - values, err := Values(reply, err) - if err != nil { - return ints, err - } - if err := ScanSlice(values, &ints); err != nil { - return ints, err - } - return ints, nil -} - -// StringMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. -// Requires an even number of values in result. -func StringMap(result interface{}, err error) (map[string]string, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: StringMap expects even number of values result") - } - m := make(map[string]string, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, okKey := values[i].([]byte) - value, okValue := values[i+1].([]byte) - if !okKey || !okValue { - return nil, errors.New("redigo: ScanMap key not a bulk string value") - } - m[string(key)] = string(value) - } - return m, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go deleted file mode 100644 index 92744c59..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/garyburd/redigo/internal/redistest" - "github.com/garyburd/redigo/redis" -) - -type valueError struct { - v interface{} - err error -} - -func ve(v interface{}, err error) valueError { - return valueError{v, err} -} - -var replyTests = []struct { - name interface{} - actual valueError - expected valueError -}{ - { - "ints([v1, v2])", - ve(redis.Ints([]interface{}{[]byte("4"), []byte("5")}, nil)), - ve([]int{4, 5}, nil), - }, - { - "ints(nil)", - ve(redis.Ints(nil, nil)), - ve([]int(nil), redis.ErrNil), - }, - { - "strings([v1, v2])", - ve(redis.Strings([]interface{}{[]byte("v1"), []byte("v2")}, nil)), - ve([]string{"v1", "v2"}, nil), - }, - { - "strings(nil)", - ve(redis.Strings(nil, nil)), - ve([]string(nil), redis.ErrNil), - }, - { - "values([v1, v2])", - ve(redis.Values([]interface{}{[]byte("v1"), []byte("v2")}, nil)), - ve([]interface{}{[]byte("v1"), []byte("v2")}, nil), - }, - { - "values(nil)", - ve(redis.Values(nil, nil)), - ve([]interface{}(nil), redis.ErrNil), - }, - { - "float64(1.0)", - ve(redis.Float64([]byte("1.0"), nil)), - ve(float64(1.0), nil), - }, - { - "float64(nil)", - ve(redis.Float64(nil, nil)), - ve(float64(0.0), redis.ErrNil), - }, - { - "uint64(1)", - ve(redis.Uint64(int64(1), nil)), - ve(uint64(1), nil), - }, - { - "uint64(-1)", - ve(redis.Uint64(int64(-1), nil)), - ve(uint64(0), redis.ErrNegativeInt), - }, -} - -func TestReply(t *testing.T) { - for _, rt := range replyTests { - if rt.actual.err != rt.expected.err { - t.Errorf("%s returned err %v, want %v", rt.name, rt.actual.err, rt.expected.err) - continue - } - if !reflect.DeepEqual(rt.actual.v, rt.expected.v) { - t.Errorf("%s=%+v, want %+v", rt.name, rt.actual.v, rt.expected.v) - } - } -} - -// dial wraps DialTestDB() with a more suitable function name for examples. -func dial() (redis.Conn, error) { - return redistest.Dial() -} - -func ExampleBool() { - c, err := dial() - if err != nil { - panic(err) - } - defer c.Close() - - c.Do("SET", "foo", 1) - exists, _ := redis.Bool(c.Do("EXISTS", "foo")) - fmt.Printf("%#v\n", exists) - // Output: - // true -} - -func ExampleInt() { - c, err := dial() - if err != nil { - panic(err) - } - defer c.Close() - - c.Do("SET", "k1", 1) - n, _ := redis.Int(c.Do("GET", "k1")) - fmt.Printf("%#v\n", n) - n, _ = redis.Int(c.Do("INCR", "k1")) - fmt.Printf("%#v\n", n) - // Output: - // 1 - // 2 -} - -func ExampleInts() { - c, err := dial() - if err != nil { - panic(err) - } - defer c.Close() - - c.Do("SADD", "set_with_integers", 4, 5, 6) - ints, _ := redis.Ints(c.Do("SMEMBERS", "set_with_integers")) - fmt.Printf("%#v\n", ints) - // Output: - // []int{4, 5, 6} -} - -func ExampleString() { - c, err := dial() - if err != nil { - panic(err) - } - defer c.Close() - - c.Do("SET", "hello", "world") - s, err := redis.String(c.Do("GET", "hello")) - fmt.Printf("%#v\n", s) - // Output: - // "world" -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go deleted file mode 100644 index 8c9cfa18..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "sync" -) - -func ensureLen(d reflect.Value, n int) { - if n > d.Cap() { - d.Set(reflect.MakeSlice(d.Type(), n, n)) - } else { - d.SetLen(n) - } -} - -func cannotConvert(d reflect.Value, s interface{}) error { - return fmt.Errorf("redigo: Scan cannot convert from %s to %s", - reflect.TypeOf(s), d.Type()) -} - -func convertAssignBytes(d reflect.Value, s []byte) (err error) { - switch d.Type().Kind() { - case reflect.Float32, reflect.Float64: - var x float64 - x, err = strconv.ParseFloat(string(s), d.Type().Bits()) - d.SetFloat(x) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - var x int64 - x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) - d.SetInt(x) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - var x uint64 - x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) - d.SetUint(x) - case reflect.Bool: - var x bool - x, err = strconv.ParseBool(string(s)) - d.SetBool(x) - case reflect.String: - d.SetString(string(s)) - case reflect.Slice: - if d.Type().Elem().Kind() != reflect.Uint8 { - err = cannotConvert(d, s) - } else { - d.SetBytes(s) - } - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignInt(d reflect.Value, s int64) (err error) { - switch d.Type().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - d.SetInt(s) - if d.Int() != s { - err = strconv.ErrRange - d.SetInt(0) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if s < 0 { - err = strconv.ErrRange - } else { - x := uint64(s) - d.SetUint(x) - if d.Uint() != x { - err = strconv.ErrRange - d.SetUint(0) - } - } - case reflect.Bool: - d.SetBool(s != 0) - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignValue(d reflect.Value, s interface{}) (err error) { - switch s := s.(type) { - case []byte: - err = convertAssignBytes(d, s) - case int64: - err = convertAssignInt(d, s) - default: - err = cannotConvert(d, s) - } - return err -} - -func convertAssignValues(d reflect.Value, s []interface{}) error { - if d.Type().Kind() != reflect.Slice { - return cannotConvert(d, s) - } - ensureLen(d, len(s)) - for i := 0; i < len(s); i++ { - if err := convertAssignValue(d.Index(i), s[i]); err != nil { - return err - } - } - return nil -} - -func convertAssign(d interface{}, s interface{}) (err error) { - // Handle the most common destination types using type switches and - // fall back to reflection for all other types. - switch s := s.(type) { - case nil: - // ingore - case []byte: - switch d := d.(type) { - case *string: - *d = string(s) - case *int: - *d, err = strconv.Atoi(string(s)) - case *bool: - *d, err = strconv.ParseBool(string(s)) - case *[]byte: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignBytes(d.Elem(), s) - } - } - case int64: - switch d := d.(type) { - case *int: - x := int(s) - if int64(x) != s { - err = strconv.ErrRange - x = 0 - } - *d = x - case *bool: - *d = s != 0 - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignInt(d.Elem(), s) - } - } - case []interface{}: - switch d := d.(type) { - case *[]interface{}: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignValues(d.Elem(), s) - } - } - case Error: - err = s - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - return -} - -// Scan copies from src to the values pointed at by dest. -// -// The values pointed at by dest must be an integer, float, boolean, string, -// []byte, interface{} or slices of these types. Scan uses the standard strconv -// package to convert bulk strings to numeric and boolean types. -// -// If a dest value is nil, then the corresponding src value is skipped. -// -// If a src element is nil, then the corresponding dest value is not modified. -// -// To enable easy use of Scan in a loop, Scan returns the slice of src -// following the copied values. -func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { - if len(src) < len(dest) { - return nil, errors.New("redigo: Scan array short") - } - var err error - for i, d := range dest { - err = convertAssign(d, src[i]) - if err != nil { - break - } - } - return src[len(dest):], err -} - -type fieldSpec struct { - name string - index []int - //omitEmpty bool -} - -type structSpec struct { - m map[string]*fieldSpec - l []*fieldSpec -} - -func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { - return ss.m[string(name)] -} - -func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - switch { - case f.PkgPath != "": - // Ignore unexported fields. - case f.Anonymous: - // TODO: Handle pointers. Requires change to decoder and - // protection against infinite recursion. - if f.Type.Kind() == reflect.Struct { - compileStructSpec(f.Type, depth, append(index, i), ss) - } - default: - fs := &fieldSpec{name: f.Name} - tag := f.Tag.Get("redis") - p := strings.Split(tag, ",") - if len(p) > 0 { - if p[0] == "-" { - continue - } - if len(p[0]) > 0 { - fs.name = p[0] - } - for _, s := range p[1:] { - switch s { - //case "omitempty": - // fs.omitempty = true - default: - panic(errors.New("redigo: unknown field flag " + s + " for type " + t.Name())) - } - } - } - d, found := depth[fs.name] - if !found { - d = 1 << 30 - } - switch { - case len(index) == d: - // At same depth, remove from result. - delete(ss.m, fs.name) - j := 0 - for i := 0; i < len(ss.l); i++ { - if fs.name != ss.l[i].name { - ss.l[j] = ss.l[i] - j += 1 - } - } - ss.l = ss.l[:j] - case len(index) < d: - fs.index = make([]int, len(index)+1) - copy(fs.index, index) - fs.index[len(index)] = i - depth[fs.name] = len(index) - ss.m[fs.name] = fs - ss.l = append(ss.l, fs) - } - } - } -} - -var ( - structSpecMutex sync.RWMutex - structSpecCache = make(map[reflect.Type]*structSpec) - defaultFieldSpec = &fieldSpec{} -) - -func structSpecForType(t reflect.Type) *structSpec { - - structSpecMutex.RLock() - ss, found := structSpecCache[t] - structSpecMutex.RUnlock() - if found { - return ss - } - - structSpecMutex.Lock() - defer structSpecMutex.Unlock() - ss, found = structSpecCache[t] - if found { - return ss - } - - ss = &structSpec{m: make(map[string]*fieldSpec)} - compileStructSpec(t, make(map[string]int), nil, ss) - structSpecCache[t] = ss - return ss -} - -var errScanStructValue = errors.New("redigo: ScanStruct value must be non-nil pointer to a struct") - -// ScanStruct scans alternating names and values from src to a struct. The -// HGETALL and CONFIG GET commands return replies in this format. -// -// ScanStruct uses exported field names to match values in the response. Use -// 'redis' field tag to override the name: -// -// Field int `redis:"myName"` -// -// Fields with the tag redis:"-" are ignored. -// -// Integer, float, boolean, string and []byte fields are supported. Scan uses the -// standard strconv package to convert bulk string values to numeric and -// boolean types. -// -// If a src element is nil, then the corresponding field is not modified. -func ScanStruct(src []interface{}, dest interface{}) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanStructValue - } - d = d.Elem() - if d.Kind() != reflect.Struct { - return errScanStructValue - } - ss := structSpecForType(d.Type()) - - if len(src)%2 != 0 { - return errors.New("redigo: ScanStruct expects even number of values in values") - } - - for i := 0; i < len(src); i += 2 { - s := src[i+1] - if s == nil { - continue - } - name, ok := src[i].([]byte) - if !ok { - return errors.New("redigo: ScanStruct key not a bulk string value") - } - fs := ss.fieldSpec(name) - if fs == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return err - } - } - return nil -} - -var ( - errScanSliceValue = errors.New("redigo: ScanSlice dest must be non-nil pointer to a struct") -) - -// ScanSlice scans src to the slice pointed to by dest. The elements the dest -// slice must be integer, float, boolean, string, struct or pointer to struct -// values. -// -// Struct fields must be integer, float, boolean or string values. All struct -// fields are used unless a subset is specified using fieldNames. -func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanSliceValue - } - d = d.Elem() - if d.Kind() != reflect.Slice { - return errScanSliceValue - } - - isPtr := false - t := d.Type().Elem() - if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { - isPtr = true - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - ensureLen(d, len(src)) - for i, s := range src { - if s == nil { - continue - } - if err := convertAssignValue(d.Index(i), s); err != nil { - return err - } - } - return nil - } - - ss := structSpecForType(t) - fss := ss.l - if len(fieldNames) > 0 { - fss = make([]*fieldSpec, len(fieldNames)) - for i, name := range fieldNames { - fss[i] = ss.m[name] - if fss[i] == nil { - return errors.New("redigo: ScanSlice bad field name " + name) - } - } - } - - if len(fss) == 0 { - return errors.New("redigo: ScanSlice no struct fields") - } - - n := len(src) / len(fss) - if n*len(fss) != len(src) { - return errors.New("redigo: ScanSlice length not a multiple of struct field count") - } - - ensureLen(d, n) - for i := 0; i < n; i++ { - d := d.Index(i) - if isPtr { - if d.IsNil() { - d.Set(reflect.New(t)) - } - d = d.Elem() - } - for j, fs := range fss { - s := src[i*len(fss)+j] - if s == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return err - } - } - } - return nil -} - -// Args is a helper for constructing command arguments from structured values. -type Args []interface{} - -// Add returns the result of appending value to args. -func (args Args) Add(value ...interface{}) Args { - return append(args, value...) -} - -// AddFlat returns the result of appending the flattened value of v to args. -// -// Maps are flattened by appending the alternating keys and map values to args. -// -// Slices are flattened by appending the slice elements to args. -// -// Structs are flattened by appending the alternating names and values of -// exported fields to args. If v is a nil struct pointer, then nothing is -// appended. The 'redis' field tag overrides struct field names. See ScanStruct -// for more information on the use of the 'redis' field tag. -// -// Other types are appended to args as is. -func (args Args) AddFlat(v interface{}) Args { - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Struct: - args = flattenStruct(args, rv) - case reflect.Slice: - for i := 0; i < rv.Len(); i++ { - args = append(args, rv.Index(i).Interface()) - } - case reflect.Map: - for _, k := range rv.MapKeys() { - args = append(args, k.Interface(), rv.MapIndex(k).Interface()) - } - case reflect.Ptr: - if rv.Type().Elem().Kind() == reflect.Struct { - if !rv.IsNil() { - args = flattenStruct(args, rv.Elem()) - } - } else { - args = append(args, v) - } - default: - args = append(args, v) - } - return args -} - -func flattenStruct(args Args, v reflect.Value) Args { - ss := structSpecForType(v.Type()) - for _, fs := range ss.l { - fv := v.FieldByIndex(fs.index) - args = append(args, fs.name, fv.Interface()) - } - return args -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go deleted file mode 100644 index b57dd896..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis_test - -import ( - "fmt" - "github.com/garyburd/redigo/redis" - "math" - "reflect" - "testing" -) - -var scanConversionTests = []struct { - src interface{} - dest interface{} -}{ - {[]byte("-inf"), math.Inf(-1)}, - {[]byte("+inf"), math.Inf(1)}, - {[]byte("0"), float64(0)}, - {[]byte("3.14159"), float64(3.14159)}, - {[]byte("3.14"), float32(3.14)}, - {[]byte("-100"), int(-100)}, - {[]byte("101"), int(101)}, - {int64(102), int(102)}, - {[]byte("103"), uint(103)}, - {int64(104), uint(104)}, - {[]byte("105"), int8(105)}, - {int64(106), int8(106)}, - {[]byte("107"), uint8(107)}, - {int64(108), uint8(108)}, - {[]byte("0"), false}, - {int64(0), false}, - {[]byte("f"), false}, - {[]byte("1"), true}, - {int64(1), true}, - {[]byte("t"), true}, - {[]byte("hello"), "hello"}, - {[]byte("world"), []byte("world")}, - {[]interface{}{[]byte("foo")}, []interface{}{[]byte("foo")}}, - {[]interface{}{[]byte("foo")}, []string{"foo"}}, - {[]interface{}{[]byte("hello"), []byte("world")}, []string{"hello", "world"}}, - {[]interface{}{[]byte("bar")}, [][]byte{[]byte("bar")}}, - {[]interface{}{[]byte("1")}, []int{1}}, - {[]interface{}{[]byte("1"), []byte("2")}, []int{1, 2}}, - {[]interface{}{[]byte("1"), []byte("2")}, []float64{1, 2}}, - {[]interface{}{[]byte("1")}, []byte{1}}, - {[]interface{}{[]byte("1")}, []bool{true}}, -} - -func TestScanConversion(t *testing.T) { - for _, tt := range scanConversionTests { - values := []interface{}{tt.src} - dest := reflect.New(reflect.TypeOf(tt.dest)) - values, err := redis.Scan(values, dest.Interface()) - if err != nil { - t.Errorf("Scan(%v) returned error %v", tt, err) - continue - } - if !reflect.DeepEqual(tt.dest, dest.Elem().Interface()) { - t.Errorf("Scan(%v) returned %v, want %v", tt, dest.Elem().Interface(), tt.dest) - } - } -} - -var scanConversionErrorTests = []struct { - src interface{} - dest interface{} -}{ - {[]byte("1234"), byte(0)}, - {int64(1234), byte(0)}, - {[]byte("-1"), byte(0)}, - {int64(-1), byte(0)}, - {[]byte("junk"), false}, - {redis.Error("blah"), false}, -} - -func TestScanConversionError(t *testing.T) { - for _, tt := range scanConversionErrorTests { - values := []interface{}{tt.src} - dest := reflect.New(reflect.TypeOf(tt.dest)) - values, err := redis.Scan(values, dest.Interface()) - if err == nil { - t.Errorf("Scan(%v) did not return error", tt) - } - } -} - -func ExampleScan() { - c, err := dial() - if err != nil { - panic(err) - } - defer c.Close() - - c.Send("HMSET", "album:1", "title", "Red", "rating", 5) - c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) - c.Send("HMSET", "album:3", "title", "Beat") - c.Send("LPUSH", "albums", "1") - c.Send("LPUSH", "albums", "2") - c.Send("LPUSH", "albums", "3") - values, err := redis.Values(c.Do("SORT", "albums", - "BY", "album:*->rating", - "GET", "album:*->title", - "GET", "album:*->rating")) - if err != nil { - panic(err) - } - - for len(values) > 0 { - var title string - rating := -1 // initialize to illegal value to detect nil. - values, err = redis.Scan(values, &title, &rating) - if err != nil { - panic(err) - } - if rating == -1 { - fmt.Println(title, "not-rated") - } else { - fmt.Println(title, rating) - } - } - // Output: - // Beat not-rated - // Earthbound 1 - // Red 5 -} - -type s0 struct { - X int - Y int `redis:"y"` - Bt bool -} - -type s1 struct { - X int `redis:"-"` - I int `redis:"i"` - U uint `redis:"u"` - S string `redis:"s"` - P []byte `redis:"p"` - B bool `redis:"b"` - Bt bool - Bf bool - s0 -} - -var scanStructTests = []struct { - title string - reply []string - value interface{} -}{ - {"basic", - []string{"i", "-1234", "u", "5678", "s", "hello", "p", "world", "b", "t", "Bt", "1", "Bf", "0", "X", "123", "y", "456"}, - &s1{I: -1234, U: 5678, S: "hello", P: []byte("world"), B: true, Bt: true, Bf: false, s0: s0{X: 123, Y: 456}}, - }, -} - -func TestScanStruct(t *testing.T) { - for _, tt := range scanStructTests { - - var reply []interface{} - for _, v := range tt.reply { - reply = append(reply, []byte(v)) - } - - value := reflect.New(reflect.ValueOf(tt.value).Type().Elem()) - - if err := redis.ScanStruct(reply, value.Interface()); err != nil { - t.Fatalf("ScanStruct(%s) returned error %v", tt.title, err) - } - - if !reflect.DeepEqual(value.Interface(), tt.value) { - t.Fatalf("ScanStruct(%s) returned %v, want %v", tt.title, value.Interface(), tt.value) - } - } -} - -func TestBadScanStructArgs(t *testing.T) { - x := []interface{}{"A", "b"} - test := func(v interface{}) { - if err := redis.ScanStruct(x, v); err == nil { - t.Errorf("Expect error for ScanStruct(%T, %T)", x, v) - } - } - - test(nil) - - var v0 *struct{} - test(v0) - - var v1 int - test(&v1) - - x = x[:1] - v2 := struct{ A string }{} - test(&v2) -} - -var scanSliceTests = []struct { - src []interface{} - fieldNames []string - ok bool - dest interface{} -}{ - { - []interface{}{[]byte("1"), nil, []byte("-1")}, - nil, - true, - []int{1, 0, -1}, - }, - { - []interface{}{[]byte("1"), nil, []byte("2")}, - nil, - true, - []uint{1, 0, 2}, - }, - { - []interface{}{[]byte("-1")}, - nil, - false, - []uint{1}, - }, - { - []interface{}{[]byte("hello"), nil, []byte("world")}, - nil, - true, - [][]byte{[]byte("hello"), nil, []byte("world")}, - }, - { - []interface{}{[]byte("hello"), nil, []byte("world")}, - nil, - true, - []string{"hello", "", "world"}, - }, - { - []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, - nil, - true, - []struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, - }, - { - []interface{}{[]byte("a1"), []byte("b1")}, - nil, - false, - []struct{ A, B, C string }{{"a1", "b1", ""}}, - }, - { - []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, - nil, - true, - []*struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, - }, - { - []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, - []string{"A", "B"}, - true, - []struct{ A, C, B string }{{"a1", "", "b1"}, {"a2", "", "b2"}}, - }, - { - []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, - nil, - false, - []struct{}{}, - }, -} - -func TestScanSlice(t *testing.T) { - for _, tt := range scanSliceTests { - - typ := reflect.ValueOf(tt.dest).Type() - dest := reflect.New(typ) - - err := redis.ScanSlice(tt.src, dest.Interface(), tt.fieldNames...) - if tt.ok != (err == nil) { - t.Errorf("ScanSlice(%v, []%s, %v) returned error %v", tt.src, typ, tt.fieldNames, err) - continue - } - if tt.ok && !reflect.DeepEqual(dest.Elem().Interface(), tt.dest) { - t.Errorf("ScanSlice(src, []%s) returned %#v, want %#v", typ, dest.Elem().Interface(), tt.dest) - } - } -} - -func ExampleScanSlice() { - c, err := dial() - if err != nil { - panic(err) - } - defer c.Close() - - c.Send("HMSET", "album:1", "title", "Red", "rating", 5) - c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) - c.Send("HMSET", "album:3", "title", "Beat", "rating", 4) - c.Send("LPUSH", "albums", "1") - c.Send("LPUSH", "albums", "2") - c.Send("LPUSH", "albums", "3") - values, err := redis.Values(c.Do("SORT", "albums", - "BY", "album:*->rating", - "GET", "album:*->title", - "GET", "album:*->rating")) - if err != nil { - panic(err) - } - - var albums []struct { - Title string - Rating int - } - if err := redis.ScanSlice(values, &albums); err != nil { - panic(err) - } - fmt.Printf("%v\n", albums) - // Output: - // [{Earthbound 1} {Beat 4} {Red 5}] -} - -var argsTests = []struct { - title string - actual redis.Args - expected redis.Args -}{ - {"struct ptr", - redis.Args{}.AddFlat(&struct { - I int `redis:"i"` - U uint `redis:"u"` - S string `redis:"s"` - P []byte `redis:"p"` - Bt bool - Bf bool - }{ - -1234, 5678, "hello", []byte("world"), true, false, - }), - redis.Args{"i", int(-1234), "u", uint(5678), "s", "hello", "p", []byte("world"), "Bt", true, "Bf", false}, - }, - {"struct", - redis.Args{}.AddFlat(struct{ I int }{123}), - redis.Args{"I", 123}, - }, - {"slice", - redis.Args{}.Add(1).AddFlat([]string{"a", "b", "c"}).Add(2), - redis.Args{1, "a", "b", "c", 2}, - }, -} - -func TestArgs(t *testing.T) { - for _, tt := range argsTests { - if !reflect.DeepEqual(tt.actual, tt.expected) { - t.Fatalf("%s is %v, want %v", tt.title, tt.actual, tt.expected) - } - } -} - -func ExampleArgs() { - c, err := dial() - if err != nil { - panic(err) - } - defer c.Close() - - var p1, p2 struct { - Title string `redis:"title"` - Author string `redis:"author"` - Body string `redis:"body"` - } - - p1.Title = "Example" - p1.Author = "Gary" - p1.Body = "Hello" - - if _, err := c.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil { - panic(err) - } - - m := map[string]string{ - "title": "Example2", - "author": "Steve", - "body": "Map", - } - - if _, err := c.Do("HMSET", redis.Args{}.Add("id2").AddFlat(m)...); err != nil { - panic(err) - } - - for _, id := range []string{"id1", "id2"} { - - v, err := redis.Values(c.Do("HGETALL", id)) - if err != nil { - panic(err) - } - - if err := redis.ScanStruct(v, &p2); err != nil { - panic(err) - } - - fmt.Printf("%+v\n", p2) - } - - // Output: - // {Title:Example Author:Gary Body:Hello} - // {Title:Example2 Author:Steve Body:Map} -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go deleted file mode 100644 index 78605a90..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "crypto/sha1" - "encoding/hex" - "io" - "strings" -) - -// Script encapsulates the source, hash and key count for a Lua script. See -// http://redis.io/commands/eval for information on scripts in Redis. -type Script struct { - keyCount int - src string - hash string -} - -// NewScript returns a new script object. If keyCount is greater than or equal -// to zero, then the count is automatically inserted in the EVAL command -// argument list. If keyCount is less than zero, then the application supplies -// the count as the first value in the keysAndArgs argument to the Do, Send and -// SendHash methods. -func NewScript(keyCount int, src string) *Script { - h := sha1.New() - io.WriteString(h, src) - return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} -} - -func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { - var args []interface{} - if s.keyCount < 0 { - args = make([]interface{}, 1+len(keysAndArgs)) - args[0] = spec - copy(args[1:], keysAndArgs) - } else { - args = make([]interface{}, 2+len(keysAndArgs)) - args[0] = spec - args[1] = s.keyCount - copy(args[2:], keysAndArgs) - } - return args -} - -// Do evaluates the script. Under the covers, Do optimistically evaluates the -// script using the EVALSHA command. If the command fails because the script is -// not loaded, then Do evaluates the script using the EVAL command (thus -// causing the script to load). -func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { - v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) - if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { - v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) - } - return v, err -} - -// SendHash evaluates the script without waiting for the reply. The script is -// evaluated with the EVALSHA command. The application must ensure that the -// script is loaded by a previous call to Send, Do or Load methods. -func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) -} - -// Send evaluates the script without waiting for the reply. -func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVAL", s.args(s.src, keysAndArgs)...) -} - -// Load loads the script without evaluating it. -func (s *Script) Load(c Conn) error { - _, err := c.Do("SCRIPT", "LOAD", s.src) - return err -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go deleted file mode 100644 index c9635bf0..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis_test - -import ( - "fmt" - "reflect" - "testing" - "time" - - "github.com/garyburd/redigo/internal/redistest" - "github.com/garyburd/redigo/redis" -) - -func ExampleScript(c redis.Conn, reply interface{}, err error) { - // Initialize a package-level variable with a script. - var getScript = redis.NewScript(1, `return redis.call('get', KEYS[1])`) - - // In a function, use the script Do method to evaluate the script. The Do - // method optimistically uses the EVALSHA command. If the script is not - // loaded, then the Do method falls back to the EVAL command. - reply, err = getScript.Do(c, "foo") -} - -func TestScript(t *testing.T) { - c, err := redistest.Dial() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - // To test fall back in Do, we make script unique by adding comment with current time. - script := fmt.Sprintf("--%d\nreturn {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", time.Now().UnixNano()) - s := redis.NewScript(2, script) - reply := []interface{}{[]byte("key1"), []byte("key2"), []byte("arg1"), []byte("arg2")} - - v, err := s.Do(c, "key1", "key2", "arg1", "arg2") - if err != nil { - t.Errorf("s.Do(c, ...) returned %v", err) - } - - if !reflect.DeepEqual(v, reply) { - t.Errorf("s.Do(c, ..); = %v, want %v", v, reply) - } - - err = s.Load(c) - if err != nil { - t.Errorf("s.Load(c) returned %v", err) - } - - err = s.SendHash(c, "key1", "key2", "arg1", "arg2") - if err != nil { - t.Errorf("s.SendHash(c, ...) returned %v", err) - } - - err = c.Flush() - if err != nil { - t.Errorf("c.Flush() returned %v", err) - } - - v, err = c.Receive() - if !reflect.DeepEqual(v, reply) { - t.Errorf("s.SendHash(c, ..); c.Receive() = %v, want %v", v, reply) - } - - err = s.Send(c, "key1", "key2", "arg1", "arg2") - if err != nil { - t.Errorf("s.Send(c, ...) returned %v", err) - } - - err = c.Flush() - if err != nil { - t.Errorf("c.Flush() returned %v", err) - } - - v, err = c.Receive() - if !reflect.DeepEqual(v, reply) { - t.Errorf("s.Send(c, ..); c.Receive() = %v, want %v", v, reply) - } - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go deleted file mode 100644 index b959a11f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bufio" - "net" - "time" -) - -func SetNowFunc(f func() time.Time) { - nowFunc = f -} - -type nopCloser struct{ net.Conn } - -func (nopCloser) Close() error { return nil } - -// NewConnBufio is a hook for tests. -func NewConnBufio(rw bufio.ReadWriter) Conn { - return &conn{br: rw.Reader, bw: rw.Writer, conn: nopCloser{}} -} - -var ( - ErrNegativeInt = errNegativeInt -) diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go deleted file mode 100644 index 1d86ee6c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2013 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis_test - -import ( - "fmt" - "github.com/garyburd/redigo/redis" -) - -// zpop pops a value from the ZSET key using WATCH/MULTI/EXEC commands. -func zpop(c redis.Conn, key string) (result string, err error) { - - defer func() { - // Return connection to normal state on error. - if err != nil { - c.Do("DISCARD") - } - }() - - // Loop until transaction is successful. - for { - if _, err := c.Do("WATCH", key); err != nil { - return "", err - } - - members, err := redis.Strings(c.Do("ZRANGE", key, 0, 0)) - if err != nil { - return "", err - } - if len(members) != 1 { - return "", redis.ErrNil - } - - c.Send("MULTI") - c.Send("ZREM", key, members[0]) - queued, err := c.Do("EXEC") - if err != nil { - return "", err - } - - if queued != nil { - result = members[0] - break - } - } - - return result, nil -} - -// zpopScript pops a value from a ZSET. -var zpopScript = redis.NewScript(1, ` - local r = redis.call('ZRANGE', KEYS[1], 0, 0) - if r ~= nil then - r = r[1] - redis.call('ZREM', KEYS[1], r) - end - return r -`) - -// This example implements ZPOP as described at -// http://redis.io/topics/transactions using WATCH/MULTI/EXEC and scripting. -func Example_zpop() { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - - // Add test data using a pipeline. - - for i, member := range []string{"red", "blue", "green"} { - c.Send("ZADD", "zset", i, member) - } - if _, err := c.Do(""); err != nil { - fmt.Println(err) - return - } - - // Pop using WATCH/MULTI/EXEC - - v, err := zpop(c, "zset") - if err != nil { - fmt.Println(err) - return - } - fmt.Println(v) - - // Pop using a script. - - v, err = redis.String(zpopScript.Do(c, "zset")) - if err != nil { - fmt.Println(err) - return - } - fmt.Println(v) - - // Output: - // red - // blue -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml deleted file mode 100644 index d87d4657..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb872..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b0..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/context.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/context_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/context_test.go deleted file mode 100644 index 6ada8ec3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/context_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "testing" -) - -type keyType int - -const ( - key1 keyType = iota - key2 -) - -func TestContext(t *testing.T) { - assertEqual := func(val interface{}, exp interface{}) { - if val != exp { - t.Errorf("Expected %v, got %v.", exp, val) - } - } - - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - - // Get() - assertEqual(Get(r, key1), nil) - - // Set() - Set(r, key1, "1") - assertEqual(Get(r, key1), "1") - assertEqual(len(data[r]), 1) - - Set(r, key2, "2") - assertEqual(Get(r, key2), "2") - assertEqual(len(data[r]), 2) - - //GetOk - value, ok := GetOk(r, key1) - assertEqual(value, "1") - assertEqual(ok, true) - - value, ok = GetOk(r, "not exists") - assertEqual(value, nil) - assertEqual(ok, false) - - Set(r, "nil value", nil) - value, ok = GetOk(r, "nil value") - assertEqual(value, nil) - assertEqual(ok, true) - - // GetAll() - values := GetAll(r) - assertEqual(len(values), 3) - - // GetAll() for empty request - values = GetAll(emptyR) - if values != nil { - t.Error("GetAll didn't return nil value for invalid request") - } - - // GetAllOk() - values, ok = GetAllOk(r) - assertEqual(len(values), 3) - assertEqual(ok, true) - - // GetAllOk() for empty request - values, ok = GetAllOk(emptyR) - assertEqual(value, nil) - assertEqual(ok, false) - - // Delete() - Delete(r, key1) - assertEqual(Get(r, key1), nil) - assertEqual(len(data[r]), 2) - - Delete(r, key2) - assertEqual(Get(r, key2), nil) - assertEqual(len(data[r]), 1) - - // Clear() - Clear(r) - assertEqual(len(data), 0) -} - -func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Get(r, key) - } - done <- struct{}{} - -} - -func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Get(r, key) - } - done <- struct{}{} - -} - -func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { - - b.StopTimer() - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - done := make(chan struct{}) - b.StartTimer() - - for i := 0; i < b.N; i++ { - wait := make(chan struct{}) - - for i := 0; i < numReaders; i++ { - go parallelReader(r, "test", iterations, wait, done) - } - - for i := 0; i < numWriters; i++ { - go parallelWriter(r, "test", "123", iterations, wait, done) - } - - close(wait) - - for i := 0; i < numReaders+numWriters; i++ { - <-done - } - - } - -} - -func BenchmarkMutexSameReadWrite1(b *testing.B) { - benchmarkMutex(b, 1, 1, 32) -} -func BenchmarkMutexSameReadWrite2(b *testing.B) { - benchmarkMutex(b, 2, 2, 32) -} -func BenchmarkMutexSameReadWrite4(b *testing.B) { - benchmarkMutex(b, 4, 4, 32) -} -func BenchmarkMutex1(b *testing.B) { - benchmarkMutex(b, 2, 8, 32) -} -func BenchmarkMutex2(b *testing.B) { - benchmarkMutex(b, 16, 4, 64) -} -func BenchmarkMutex3(b *testing.B) { - benchmarkMutex(b, 1, 2, 128) -} -func BenchmarkMutex4(b *testing.B) { - benchmarkMutex(b, 128, 32, 256) -} -func BenchmarkMutex5(b *testing.B) { - benchmarkMutex(b, 1024, 2048, 64) -} -func BenchmarkMutex6(b *testing.B) { - benchmarkMutex(b, 2048, 1024, 512) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/doc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/doc.go deleted file mode 100644 index 73c74003..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml deleted file mode 100644 index 354b7f8b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - tip diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/README.md deleted file mode 100644 index a340abe0..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/README.md +++ /dev/null @@ -1,52 +0,0 @@ -gorilla/handlers -================ -[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers) - -Package handlers is a collection of handlers (aka "HTTP middleware") for use -with Go's `net/http` package (or any framework supporting `http.Handler`), including: - -* `LoggingHandler` for logging HTTP requests in the Apache [Common Log - Format](http://httpd.apache.org/docs/2.2/logs.html#common). -* `CombinedLoggingHandler` for logging HTTP requests in the Apache [Combined Log - Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by - both Apache and nginx. -* `CompressHandler` for gzipping responses. -* `ContentTypeHandler` for validating requests against a list of accepted - content types. -* `MethodHandler` for matching HTTP methods against handlers in a - `map[string]http.Handler` -* `ProxyHeaders` for populating `r.RemoteAddr` and `r.URL.Scheme` based on the - `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` - headers when running a Go server behind a HTTP reverse proxy. -* `CanonicalHost` for re-directing to the preferred host when handling multiple - domains (i.e. multiple CNAME aliases). - -Other handlers are documented [on the Gorilla -website](http://www.gorillatoolkit.org/pkg/handlers). - -## Example - -A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: - -```go -import ( - "net/http" - "github.com/gorilla/handlers" -) - -func main() { - r := http.NewServeMux() - - // Only log requests to our admin dashboard to stdout - r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) - r.HandleFunc("/", ShowIndex) - - // Wrap our server with our gzip handler to gzip compress all responses. - http.ListenAndServe(":8000", handlers.CompressHandler(r)) -} -``` - -## License - -BSD licensed. See the included LICENSE file for details. - diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go deleted file mode 100644 index 3d90e191..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "compress/flate" - "compress/gzip" - "io" - "net/http" - "strings" -) - -type compressResponseWriter struct { - io.Writer - http.ResponseWriter - http.Hijacker -} - -func (w *compressResponseWriter) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *compressResponseWriter) Write(b []byte) (int, error) { - h := w.ResponseWriter.Header() - if h.Get("Content-Type") == "" { - h.Set("Content-Type", http.DetectContentType(b)) - } - - return w.Writer.Write(b) -} - -// CompressHandler gzip compresses HTTP responses for clients that support it -// via the 'Accept-Encoding' header. -func CompressHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - L: - for _, enc := range strings.Split(r.Header.Get("Accept-Encoding"), ",") { - switch strings.TrimSpace(enc) { - case "gzip": - w.Header().Set("Content-Encoding", "gzip") - w.Header().Add("Vary", "Accept-Encoding") - - gw := gzip.NewWriter(w) - defer gw.Close() - - h, hok := w.(http.Hijacker) - if !hok { /* w is not Hijacker... oh well... */ - h = nil - } - - w = &compressResponseWriter{ - Writer: gw, - ResponseWriter: w, - Hijacker: h, - } - - break L - case "deflate": - w.Header().Set("Content-Encoding", "deflate") - w.Header().Add("Vary", "Accept-Encoding") - - fw, _ := flate.NewWriter(w, flate.DefaultCompression) - defer fw.Close() - - h, hok := w.(http.Hijacker) - if !hok { /* w is not Hijacker... oh well... */ - h = nil - } - - w = &compressResponseWriter{ - Writer: fw, - ResponseWriter: w, - Hijacker: h, - } - - break L - } - } - - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go deleted file mode 100644 index 2661b399..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" -) - -func compressedRequest(w *httptest.ResponseRecorder, compression string) { - CompressHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for i := 0; i < 1024; i++ { - io.WriteString(w, "Gorilla!\n") - } - })).ServeHTTP(w, &http.Request{ - Method: "GET", - Header: http.Header{ - "Accept-Encoding": []string{compression}, - }, - }) - -} - -func TestCompressHandlerGzip(t *testing.T) { - w := httptest.NewRecorder() - compressedRequest(w, "gzip") - if w.HeaderMap.Get("Content-Encoding") != "gzip" { - t.Fatalf("wrong content encoding, got %d want %d", w.HeaderMap.Get("Content-Encoding"), "gzip") - } - if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" { - t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8") - } - if w.Body.Len() != 72 { - t.Fatalf("wrong len, got %d want %d", w.Body.Len(), 72) - } -} - -func TestCompressHandlerDeflate(t *testing.T) { - w := httptest.NewRecorder() - compressedRequest(w, "deflate") - if w.HeaderMap.Get("Content-Encoding") != "deflate" { - t.Fatalf("wrong content encoding, got %d want %d", w.HeaderMap.Get("Content-Encoding"), "deflate") - } - if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" { - t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8") - } - if w.Body.Len() != 54 { - t.Fatalf("wrong len, got %d want %d", w.Body.Len(), 54) - } -} - -func TestCompressHandlerGzipDeflate(t *testing.T) { - w := httptest.NewRecorder() - compressedRequest(w, "gzip, deflate ") - if w.HeaderMap.Get("Content-Encoding") != "gzip" { - t.Fatalf("wrong content encoding, got %s want %s", w.HeaderMap.Get("Content-Encoding"), "gzip") - } - if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" { - t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go deleted file mode 100644 index c3c20e5b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "bufio" - "fmt" - "io" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// MethodHandler is an http.Handler that dispatches to a handler whose key in the MethodHandler's -// map matches the name of the HTTP request's method, eg: GET -// -// If the request's method is OPTIONS and OPTIONS is not a key in the map then the handler -// responds with a status of 200 and sets the Allow header to a comma-separated list of -// available methods. -// -// If the request's method doesn't match any of its keys the handler responds with -// a status of 405, Method not allowed and sets the Allow header to a comma-separated list -// of available methods. -type MethodHandler map[string]http.Handler - -func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if handler, ok := h[req.Method]; ok { - handler.ServeHTTP(w, req) - } else { - allow := []string{} - for k := range h { - allow = append(allow, k) - } - sort.Strings(allow) - w.Header().Set("Allow", strings.Join(allow, ", ")) - if req.Method == "OPTIONS" { - w.WriteHeader(http.StatusOK) - } else { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } - } -} - -// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends -type loggingHandler struct { - writer io.Writer - handler http.Handler -} - -// combinedLoggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends -type combinedLoggingHandler struct { - writer io.Writer - handler http.Handler -} - -func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - t := time.Now() - logger := makeLogger(w) - url := *req.URL - h.handler.ServeHTTP(logger, req) - writeLog(h.writer, req, url, t, logger.Status(), logger.Size()) -} - -func (h combinedLoggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - t := time.Now() - logger := makeLogger(w) - url := *req.URL - h.handler.ServeHTTP(logger, req) - writeCombinedLog(h.writer, req, url, t, logger.Status(), logger.Size()) -} - -func makeLogger(w http.ResponseWriter) loggingResponseWriter { - var logger loggingResponseWriter = &responseLogger{w: w} - if _, ok := w.(http.Hijacker); ok { - logger = &hijackLogger{responseLogger{w: w}} - } - h, ok1 := logger.(http.Hijacker) - c, ok2 := w.(http.CloseNotifier) - if ok1 && ok2 { - return hijackCloseNotifier{logger, h, c} - } - if ok2 { - return &closeNotifyWriter{logger, c} - } - return logger -} - -type loggingResponseWriter interface { - http.ResponseWriter - http.Flusher - Status() int - Size() int -} - -// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP status -// code and body size -type responseLogger struct { - w http.ResponseWriter - status int - size int -} - -func (l *responseLogger) Header() http.Header { - return l.w.Header() -} - -func (l *responseLogger) Write(b []byte) (int, error) { - if l.status == 0 { - // The status will be StatusOK if WriteHeader has not been called yet - l.status = http.StatusOK - } - size, err := l.w.Write(b) - l.size += size - return size, err -} - -func (l *responseLogger) WriteHeader(s int) { - l.w.WriteHeader(s) - l.status = s -} - -func (l *responseLogger) Status() int { - return l.status -} - -func (l *responseLogger) Size() int { - return l.size -} - -func (l *responseLogger) Flush() { - f, ok := l.w.(http.Flusher) - if ok { - f.Flush() - } -} - -type hijackLogger struct { - responseLogger -} - -func (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { - h := l.responseLogger.w.(http.Hijacker) - conn, rw, err := h.Hijack() - if err == nil && l.responseLogger.status == 0 { - // The status will be StatusSwitchingProtocols if there was no error and WriteHeader has not been called yet - l.responseLogger.status = http.StatusSwitchingProtocols - } - return conn, rw, err -} - -type closeNotifyWriter struct { - loggingResponseWriter - http.CloseNotifier -} - -type hijackCloseNotifier struct { - loggingResponseWriter - http.Hijacker - http.CloseNotifier -} - -const lowerhex = "0123456789abcdef" - -func appendQuoted(buf []byte, s string) []byte { - var runeTmp [utf8.UTFMax]byte - for width := 0; len(s) > 0; s = s[width:] { - r := rune(s[0]) - width = 1 - if r >= utf8.RuneSelf { - r, width = utf8.DecodeRuneInString(s) - } - if width == 1 && r == utf8.RuneError { - buf = append(buf, `\x`...) - buf = append(buf, lowerhex[s[0]>>4]) - buf = append(buf, lowerhex[s[0]&0xF]) - continue - } - if r == rune('"') || r == '\\' { // always backslashed - buf = append(buf, '\\') - buf = append(buf, byte(r)) - continue - } - if strconv.IsPrint(r) { - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - continue - } - switch r { - case '\a': - buf = append(buf, `\a`...) - case '\b': - buf = append(buf, `\b`...) - case '\f': - buf = append(buf, `\f`...) - case '\n': - buf = append(buf, `\n`...) - case '\r': - buf = append(buf, `\r`...) - case '\t': - buf = append(buf, `\t`...) - case '\v': - buf = append(buf, `\v`...) - default: - switch { - case r < ' ': - buf = append(buf, `\x`...) - buf = append(buf, lowerhex[s[0]>>4]) - buf = append(buf, lowerhex[s[0]&0xF]) - case r > utf8.MaxRune: - r = 0xFFFD - fallthrough - case r < 0x10000: - buf = append(buf, `\u`...) - for s := 12; s >= 0; s -= 4 { - buf = append(buf, lowerhex[r>>uint(s)&0xF]) - } - default: - buf = append(buf, `\U`...) - for s := 28; s >= 0; s -= 4 { - buf = append(buf, lowerhex[r>>uint(s)&0xF]) - } - } - } - } - return buf - -} - -// buildCommonLogLine builds a log entry for req in Apache Common Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte { - username := "-" - if url.User != nil { - if name := url.User.Username(); name != "" { - username = name - } - } - - host, _, err := net.SplitHostPort(req.RemoteAddr) - - if err != nil { - host = req.RemoteAddr - } - - uri := url.RequestURI() - - buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2) - buf = append(buf, host...) - buf = append(buf, " - "...) - buf = append(buf, username...) - buf = append(buf, " ["...) - buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...) - buf = append(buf, `] "`...) - buf = append(buf, req.Method...) - buf = append(buf, " "...) - buf = appendQuoted(buf, uri) - buf = append(buf, " "...) - buf = append(buf, req.Proto...) - buf = append(buf, `" `...) - buf = append(buf, strconv.Itoa(status)...) - buf = append(buf, " "...) - buf = append(buf, strconv.Itoa(size)...) - return buf -} - -// writeLog writes a log entry for req to w in Apache Common Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func writeLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { - buf := buildCommonLogLine(req, url, ts, status, size) - buf = append(buf, '\n') - w.Write(buf) -} - -// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func writeCombinedLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { - buf := buildCommonLogLine(req, url, ts, status, size) - buf = append(buf, ` "`...) - buf = appendQuoted(buf, req.Referer()) - buf = append(buf, `" "`...) - buf = appendQuoted(buf, req.UserAgent()) - buf = append(buf, '"', '\n') - w.Write(buf) -} - -// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in -// Apache Combined Log Format. -// -// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. -// -// LoggingHandler always sets the ident field of the log to - -func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { - return combinedLoggingHandler{out, h} -} - -// LoggingHandler return a http.Handler that wraps h and logs requests to out in -// Apache Common Log Format (CLF). -// -// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format. -// -// LoggingHandler always sets the ident field of the log to - -func LoggingHandler(out io.Writer, h http.Handler) http.Handler { - return loggingHandler{out, h} -} - -// isContentType validates the Content-Type header -// is contentType. That is, its type and subtype match. -func isContentType(h http.Header, contentType string) bool { - ct := h.Get("Content-Type") - if i := strings.IndexRune(ct, ';'); i != -1 { - ct = ct[0:i] - } - return ct == contentType -} - -// ContentTypeHandler wraps and returns a http.Handler, validating the request content type -// is acompatible with the contentTypes list. -// It writes a HTTP 415 error if that fails. -// -// Only PUT, POST, and PATCH requests are considered. -func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") { - h.ServeHTTP(w, r) - return - } - - for _, ct := range contentTypes { - if isContentType(r.Header, ct) { - h.ServeHTTP(w, r) - return - } - } - http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType) - }) -} - -const ( - // HTTPMethodOverrideHeader is a commonly used - // http header to override a request method. - HTTPMethodOverrideHeader = "X-HTTP-Method-Override" - // HTTPMethodOverrideFormKey is a commonly used - // HTML form key to override a request method. - HTTPMethodOverrideFormKey = "_method" -) - -// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for the X-HTTP-Method-Override header -// or the _method form key, and overrides (if valid) request.Method with its value. -// -// This is especially useful for http clients that don't support many http verbs. -// It isn't secure to override e.g a GET to a POST, so only POST requests are considered. -// Likewise, the override method can only be a "write" method: PUT, PATCH or DELETE. -// -// Form method takes precedence over header method. -func HTTPMethodOverrideHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - om := r.FormValue(HTTPMethodOverrideFormKey) - if om == "" { - om = r.Header.Get(HTTPMethodOverrideHeader) - } - if om == "PUT" || om == "PATCH" || om == "DELETE" { - r.Method = om - } - } - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go deleted file mode 100644 index 94eeb035..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "bytes" - "net" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - "time" -) - -const ( - ok = "ok\n" - notAllowed = "Method not allowed\n" -) - -var okHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte(ok)) -}) - -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - return req -} - -func TestMethodHandler(t *testing.T) { - tests := []struct { - req *http.Request - handler http.Handler - code int - allow string // Contents of the Allow header - body string - }{ - // No handlers - {newRequest("GET", "/foo"), MethodHandler{}, http.StatusMethodNotAllowed, "", notAllowed}, - {newRequest("OPTIONS", "/foo"), MethodHandler{}, http.StatusOK, "", ""}, - - // A single handler - {newRequest("GET", "/foo"), MethodHandler{"GET": okHandler}, http.StatusOK, "", ok}, - {newRequest("POST", "/foo"), MethodHandler{"GET": okHandler}, http.StatusMethodNotAllowed, "GET", notAllowed}, - - // Multiple handlers - {newRequest("GET", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "", ok}, - {newRequest("POST", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "", ok}, - {newRequest("DELETE", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusMethodNotAllowed, "GET, POST", notAllowed}, - {newRequest("OPTIONS", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "GET, POST", ""}, - - // Override OPTIONS - {newRequest("OPTIONS", "/foo"), MethodHandler{"OPTIONS": okHandler}, http.StatusOK, "", ok}, - } - - for i, test := range tests { - rec := httptest.NewRecorder() - test.handler.ServeHTTP(rec, test.req) - if rec.Code != test.code { - t.Fatalf("%d: wrong code, got %d want %d", i, rec.Code, test.code) - } - if allow := rec.HeaderMap.Get("Allow"); allow != test.allow { - t.Fatalf("%d: wrong Allow, got %s want %s", i, allow, test.allow) - } - if body := rec.Body.String(); body != test.body { - t.Fatalf("%d: wrong body, got %q want %q", i, body, test.body) - } - } -} - -func TestWriteLog(t *testing.T) { - loc, err := time.LoadLocation("Europe/Warsaw") - if err != nil { - panic(err) - } - ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc) - - // A typical request with an OK response - req := newRequest("GET", "http://example.com") - req.RemoteAddr = "192.168.100.5" - - buf := new(bytes.Buffer) - writeLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log := buf.String() - - expected := "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Request with an unauthorized user - req = newRequest("GET", "http://example.com") - req.RemoteAddr = "192.168.100.5" - req.URL.User = url.User("kamil") - - buf.Reset() - writeLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500) - log = buf.String() - - expected = "192.168.100.5 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 401 500\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Request with url encoded parameters - req = newRequest("GET", "http://example.com/test?abc=hello%20world&a=b%3F") - req.RemoteAddr = "192.168.100.5" - - buf.Reset() - writeLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log = buf.String() - - expected = "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET /test?abc=hello%20world&a=b%3F HTTP/1.1\" 200 100\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } -} - -func TestWriteCombinedLog(t *testing.T) { - loc, err := time.LoadLocation("Europe/Warsaw") - if err != nil { - panic(err) - } - ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc) - - // A typical request with an OK response - req := newRequest("GET", "http://example.com") - req.RemoteAddr = "192.168.100.5" - req.Header.Set("Referer", "http://example.com") - req.Header.Set( - "User-Agent", - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.33 "+ - "(KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33", - ) - - buf := new(bytes.Buffer) - writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log := buf.String() - - expected := "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " + - "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " + - "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Request with an unauthorized user - req.URL.User = url.User("kamil") - - buf.Reset() - writeCombinedLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500) - log = buf.String() - - expected = "192.168.100.5 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 401 500 \"http://example.com\" " + - "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " + - "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Test with remote ipv6 address - req.RemoteAddr = "::1" - - buf.Reset() - writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log = buf.String() - - expected = "::1 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " + - "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " + - "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Test remote ipv6 addr, with port - req.RemoteAddr = net.JoinHostPort("::1", "65000") - - buf.Reset() - writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log = buf.String() - - expected = "::1 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " + - "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " + - "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } -} - -func TestLogPathRewrites(t *testing.T) { - var buf bytes.Buffer - - handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - req.URL.Path = "/" // simulate http.StripPrefix and friends - w.WriteHeader(200) - }) - logger := LoggingHandler(&buf, handler) - - logger.ServeHTTP(httptest.NewRecorder(), newRequest("GET", "/subdir/asdf")) - - if !strings.Contains(buf.String(), "GET /subdir/asdf HTTP") { - t.Fatalf("Got log %#v, wanted substring %#v", buf.String(), "GET /subdir/asdf HTTP") - } -} - -func BenchmarkWriteLog(b *testing.B) { - loc, err := time.LoadLocation("Europe/Warsaw") - if err != nil { - b.Fatalf(err.Error()) - } - ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc) - - req := newRequest("GET", "http://example.com") - req.RemoteAddr = "192.168.100.5" - - b.ResetTimer() - - buf := &bytes.Buffer{} - for i := 0; i < b.N; i++ { - buf.Reset() - writeLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500) - } -} - -func TestContentTypeHandler(t *testing.T) { - tests := []struct { - Method string - AllowContentTypes []string - ContentType string - Code int - }{ - {"POST", []string{"application/json"}, "application/json", http.StatusOK}, - {"POST", []string{"application/json", "application/xml"}, "application/json", http.StatusOK}, - {"POST", []string{"application/json"}, "application/json; charset=utf-8", http.StatusOK}, - {"POST", []string{"application/json"}, "application/json+xxx", http.StatusUnsupportedMediaType}, - {"POST", []string{"application/json"}, "text/plain", http.StatusUnsupportedMediaType}, - {"GET", []string{"application/json"}, "", http.StatusOK}, - {"GET", []string{}, "", http.StatusOK}, - } - for _, test := range tests { - r, err := http.NewRequest(test.Method, "/", nil) - if err != nil { - t.Error(err) - continue - } - - h := ContentTypeHandler(okHandler, test.AllowContentTypes...) - r.Header.Set("Content-Type", test.ContentType) - w := httptest.NewRecorder() - h.ServeHTTP(w, r) - if w.Code != test.Code { - t.Errorf("expected %d, got %d", test.Code, w.Code) - } - } -} - -func TestHTTPMethodOverride(t *testing.T) { - var tests = []struct { - Method string - OverrideMethod string - ExpectedMethod string - }{ - {"POST", "PUT", "PUT"}, - {"POST", "PATCH", "PATCH"}, - {"POST", "DELETE", "DELETE"}, - {"PUT", "DELETE", "PUT"}, - {"GET", "GET", "GET"}, - {"HEAD", "HEAD", "HEAD"}, - {"GET", "PUT", "GET"}, - {"HEAD", "DELETE", "HEAD"}, - } - - for _, test := range tests { - h := HTTPMethodOverrideHandler(okHandler) - reqs := make([]*http.Request, 0, 2) - - rHeader, err := http.NewRequest(test.Method, "/", nil) - if err != nil { - t.Error(err) - } - rHeader.Header.Set(HTTPMethodOverrideHeader, test.OverrideMethod) - reqs = append(reqs, rHeader) - - f := url.Values{HTTPMethodOverrideFormKey: []string{test.OverrideMethod}} - rForm, err := http.NewRequest(test.Method, "/", strings.NewReader(f.Encode())) - if err != nil { - t.Error(err) - } - rForm.Header.Set("Content-Type", "application/x-www-form-urlencoded") - reqs = append(reqs, rForm) - - for _, r := range reqs { - w := httptest.NewRecorder() - h.ServeHTTP(w, r) - if r.Method != test.ExpectedMethod { - t.Errorf("Expected %s, got %s", test.ExpectedMethod, r.Method) - } - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml deleted file mode 100644 index d87d4657..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb872..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/README.md deleted file mode 100644 index e60301b0..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,7 +0,0 @@ -mux -=== -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -gorilla/mux is a powerful URL router and dispatcher. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go deleted file mode 100644 index c5f97b2b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "net/http" - "testing" -) - -func BenchmarkMux(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}", handler) - - request, _ := http.NewRequest("GET", "/v1/anything", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, request) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/doc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/doc.go deleted file mode 100644 index b2deed34..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.domain.com". - r.Host("www.domain.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.domain.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.domain.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.domain.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.domain.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -*/ -package mux diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/mux.go deleted file mode 100644 index 5b5f8e7d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "path" - - "github.com/gorilla/context" -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute - // Routes to be matched, in order. - routes []*Route - // Routes by name for URL building. - namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // If true, do not clear the request context after handling the request - KeepContext bool -} - -// Match matches registered routes against the request. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - return true - } - } - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { - - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) - } - if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } - } - if !r.KeepContext { - defer context.Clear(req) - } - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} - r.routes = append(r.routes, route) - return route -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// mapFromPairs converts variadic string parameters to a string map. -func mapFromPairs(pairs ...string) (map[string]string, error) { - length := len(pairs) - if length%2 != 0 { - return nil, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMap returns true if the given key/value pairs exist in a given map. -func matchMap(toCheck map[string]string, toMatch map[string][]string, - canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go deleted file mode 100644 index e455bce8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go +++ /dev/null @@ -1,943 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "testing" - - "github.com/gorilla/context" -) - -type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - host string // the expected host of the match - path string // the expected path of the match - shouldMatch bool // whether the request is expected to match the route at all - shouldRedirect bool // whether the request should result in a redirect -} - -func TestHost(t *testing.T) { - // newRequestHost a new request with a method, url, and host header - newRequestHost := func(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req - } - - tests := []routeTest{ - { - title: "Host route match", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong port in request URL", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route, match with host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, - { - title: "Host route with port, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPath(t *testing.T) { - tests := []routeTest{ - { - title: "Path route, match", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route, match with trailing slash in request and path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - }, - { - title: "Path route, do not match with trailing slash in path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "Path route, do not match with trailing slash in request", - route: new(Route).Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: false, - }, - { - title: "Path route, wrong path in request in request URL", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPathPrefix(t *testing.T) { - tests := []routeTest{ - { - title: "PathPrefix route, match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - }, - { - title: "PathPrefix route, match substring", - route: new(Route).PathPrefix("/1"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/1", - shouldMatch: true, - }, - { - title: "PathPrefix route, URL prefix in request does not match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHostPath(t *testing.T) { - tests := []routeTest{ - { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHeaders(t *testing.T) { - // newRequestHeaders creates a new request with a method, url, and headers - newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - for k, v := range headers { - req.Header.Add(k, v) - } - return req - } - - tests := []routeTest{ - { - title: "Headers route, match", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Headers route, bad header values", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } - -} - -func TestMethods(t *testing.T) { - tests := []routeTest{ - { - title: "Methods route, match GET", - route: new(Route).Methods("GET", "POST"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, match POST", - route: new(Route).Methods("GET", "POST"), - request: newRequest("POST", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, bad method", - route: new(Route).Methods("GET", "POST"), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestQueries(t *testing.T) { - tests := []routeTest{ - { - title: "Queries route, match", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string out of order", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, bad query", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v1": "bar"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with multiple patterns, match", - route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v1": "10"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=a"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSchemes(t *testing.T) { - tests := []routeTest{ - // Schemes - { - title: "Schemes route, match https", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "https://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "ftp://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, bad scheme", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestMatcherFunc(t *testing.T) { - m := func(r *http.Request, m *RouteMatch) bool { - if r.URL.Host == "aaa.bbb.ccc" { - return true - } - return false - } - - tests := []routeTest{ - { - title: "MatchFunc route, match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.bbb.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "MatchFunc route, non-match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.222.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSubRouter(t *testing.T) { - subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() - subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() - - tests := []routeTest{ - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: true, - }, - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: false, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: true, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestNamedRoutes(t *testing.T) { - r1 := NewRouter() - r1.NewRoute().Name("a") - r1.NewRoute().Name("b") - r1.NewRoute().Name("c") - - r2 := r1.NewRoute().Subrouter() - r2.NewRoute().Name("d") - r2.NewRoute().Name("e") - r2.NewRoute().Name("f") - - r3 := r2.NewRoute().Subrouter() - r3.NewRoute().Name("g") - r3.NewRoute().Name("h") - r3.NewRoute().Name("i") - - if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { - t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) - } else if r1.Get("i") == nil { - t.Errorf("Subroute name not registered") - } -} - -func TestStrictSlash(t *testing.T) { - r := NewRouter() - r.StrictSlash(true) - - tests := []routeTest{ - { - title: "Redirect path without slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path with slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Redirect path with slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path without slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Propagate StrictSlash to subrouters", - route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), - request: newRequest("GET", "http://localhost/static/images"), - vars: map[string]string{}, - host: "", - path: "/static/images/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Ignore StrictSlash for path prefix", - route: r.NewRoute().PathPrefix("/static/"), - request: newRequest("GET", "http://localhost/static/logo.png"), - vars: map[string]string{}, - host: "", - path: "/static/", - shouldMatch: true, - shouldRedirect: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -func getRouteTemplate(route *Route) string { - host, path := "none", "none" - if route.regexp != nil { - if route.regexp.host != nil { - host = route.regexp.host.template - } - if route.regexp.path != nil { - path = route.regexp.path.template - } - } - return fmt.Sprintf("Host: %v, Path: %v", host, path) -} - -func testRoute(t *testing.T, test routeTest) { - request := test.request - route := test.route - vars := test.vars - shouldMatch := test.shouldMatch - host := test.host - path := test.path - url := test.host + test.path - shouldRedirect := test.shouldRedirect - - var match RouteMatch - ok := route.Match(request, &match) - if ok != shouldMatch { - msg := "Should match" - if !shouldMatch { - msg = "Should not match" - } - t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) - return - } - if shouldMatch { - if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { - t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) - return - } - if host != "" { - u, _ := test.route.URLHost(mapToPairs(match.Vars)...) - if host != u.Host { - t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) - return - } - } - if path != "" { - u, _ := route.URLPath(mapToPairs(match.Vars)...) - if path != u.Path { - t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) - return - } - } - if url != "" { - u, _ := route.URL(mapToPairs(match.Vars)...) - if url != u.Host+u.Path { - t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) - return - } - } - if shouldRedirect && match.Handler == nil { - t.Errorf("(%v) Did not redirect", test.title) - return - } - if !shouldRedirect && match.Handler != nil { - t.Errorf("(%v) Unexpected redirect", test.title) - return - } - } -} - -// Tests that the context is cleared or not cleared properly depending on -// the configuration of the router -func TestKeepContext(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - res := new(http.ResponseWriter) - r.ServeHTTP(*res, req) - - if _, ok := context.GetOk(req, "t"); ok { - t.Error("Context should have been cleared at end of request") - } - - r.KeepContext = true - - req, _ = http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - r.ServeHTTP(*res, req) - if _, ok := context.GetOk(req, "t"); !ok { - t.Error("Context should NOT have been cleared at end of request") - } - -} - -type TestA301ResponseWriter struct { - hh http.Header - status int -} - -func (ho TestA301ResponseWriter) Header() http.Header { - return http.Header(ho.hh) -} - -func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { - return 0, nil -} - -func (ho TestA301ResponseWriter) WriteHeader(code int) { - ho.status = code -} - -func Test301Redirect(t *testing.T) { - m := make(http.Header) - - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - - res := TestA301ResponseWriter{ - hh: m, - status: 0, - } - r.ServeHTTP(&res, req) - - if "http://localhost/api/?abc=def" != res.hh["Location"][0] { - t.Errorf("Should have complete URL with query string") - } -} - -// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW -func TestSubrouterHeader(t *testing.T) { - expected := "func1 response" - func1 := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, expected) - } - func2 := func(http.ResponseWriter, *http.Request) {} - - r := NewRouter() - s := r.Headers("SomeSpecialHeader", "").Subrouter() - s.HandleFunc("/", func1).Name("func1") - r.HandleFunc("/", func2).Name("func2") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - req.Header.Add("SomeSpecialHeader", "foo") - match := new(RouteMatch) - matched := r.Match(req, match) - if !matched { - t.Errorf("Should match request") - } - if match.Route.GetName() != "func1" { - t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) - } - resp := NewRecorder() - match.Handler.ServeHTTP(resp, req) - if resp.Body.String() != expected { - t.Errorf("Expecting %q", expected) - } -} - -// mapToPairs converts a string map to a slice of string pairs -func mapToPairs(m map[string]string) []string { - var i int - p := make([]string, len(m)*2) - for k, v := range m { - p[i] = k - p[i+1] = v - i += 2 - } - return p -} - -// stringMapEqual checks the equality of two string maps -func stringMapEqual(m1, m2 map[string]string) bool { - nil1 := m1 == nil - nil2 := m2 == nil - if nil1 != nil2 || len(m1) != len(m2) { - return false - } - for k, v := range m1 { - if v != m2[k] { - return false - } - } - return true -} - -// newRequest is a helper function to create a new request with a method and url -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - return req -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go deleted file mode 100644 index 1f7c190c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go +++ /dev/null @@ -1,714 +0,0 @@ -// Old tests ported to Go1. This is a mess. Want to drop it one day. - -// Copyright 2011 Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -// ---------------------------------------------------------------------------- -// ResponseRecorder -// ---------------------------------------------------------------------------- -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} - -// ---------------------------------------------------------------------------- - -func TestRouteMatchers(t *testing.T) { - var scheme, host, path, query, method string - var headers map[string]string - var resultVars map[bool]map[string]string - - router := NewRouter() - router.NewRoute().Host("{var1}.google.com"). - Path("/{var2:[a-z]+}/{var3:[0-9]+}"). - Queries("foo", "bar"). - Methods("GET"). - Schemes("https"). - Headers("x-requested-with", "XMLHttpRequest") - router.NewRoute().Host("www.{var4}.com"). - PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). - Queries("baz", "ding"). - Methods("POST"). - Schemes("http"). - Headers("Content-Type", "application/json") - - reset := func() { - // Everything match. - scheme = "https" - host = "www.google.com" - path = "/product/42" - query = "?foo=bar" - method = "GET" - headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} - resultVars = map[bool]map[string]string{ - true: {"var1": "www", "var2": "product", "var3": "42"}, - false: {}, - } - } - - reset2 := func() { - // Everything match. - scheme = "http" - host = "www.google.com" - path = "/foo/product/42/path/that/is/ignored" - query = "?baz=ding" - method = "POST" - headers = map[string]string{"Content-Type": "application/json"} - resultVars = map[bool]map[string]string{ - true: {"var4": "google", "var5": "product", "var6": "42"}, - false: {}, - } - } - - match := func(shouldMatch bool) { - url := scheme + "://" + host + path + query - request, _ := http.NewRequest(method, url, nil) - for key, value := range headers { - request.Header.Add(key, value) - } - - var routeMatch RouteMatch - matched := router.Match(request, &routeMatch) - if matched != shouldMatch { - // Need better messages. :) - if matched { - t.Errorf("Should match.") - } else { - t.Errorf("Should not match.") - } - } - - if matched { - currentRoute := routeMatch.Route - if currentRoute == nil { - t.Errorf("Expected a current route.") - } - vars := routeMatch.Vars - expectedVars := resultVars[shouldMatch] - if len(vars) != len(expectedVars) { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - for name, value := range vars { - if expectedVars[name] != value { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - } - } - } - - // 1st route -------------------------------------------------------------- - - // Everything match. - reset() - match(true) - - // Scheme doesn't match. - reset() - scheme = "http" - match(false) - - // Host doesn't match. - reset() - host = "www.mygoogle.com" - match(false) - - // Path doesn't match. - reset() - path = "/product/notdigits" - match(false) - - // Query doesn't match. - reset() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset() - method = "POST" - match(false) - - // Header doesn't match. - reset() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset() - match(true) - - // 2nd route -------------------------------------------------------------- - - // Everything match. - reset2() - match(true) - - // Scheme doesn't match. - reset2() - scheme = "https" - match(false) - - // Host doesn't match. - reset2() - host = "sub.google.com" - match(false) - - // Path doesn't match. - reset2() - path = "/bar/product/42" - match(false) - - // Query doesn't match. - reset2() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset2() - method = "GET" - match(false) - - // Header doesn't match. - reset2() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset2() - match(true) -} - -type headerMatcherTest struct { - matcher headerMatcher - headers map[string]string - result bool -} - -var headerMatcherTests = []headerMatcherTest{ - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": ""}), - headers: map[string]string{"X-Requested-With": "anything"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{}, - result: false, - }, -} - -type hostMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var hostMatcherTests = []hostMatcherTest{ - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://abc.def.ghi/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://a.b.c/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: false, - }, -} - -type methodMatcherTest struct { - matcher methodMatcher - method string - result bool -} - -var methodMatcherTests = []methodMatcherTest{ - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "GET", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "POST", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "PUT", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "DELETE", - result: false, - }, -} - -type pathMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var pathMatcherTests = []pathMatcherTest{ - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/123/456/789", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/1/2/3", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: false, - }, -} - -type schemeMatcherTest struct { - matcher schemeMatcher - url string - result bool -} - -var schemeMatcherTests = []schemeMatcherTest{ - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "http://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "https://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"https"}), - url: "http://localhost:8080/", - result: false, - }, - { - matcher: schemeMatcher([]string{"http"}), - url: "https://localhost:8080/", - result: false, - }, -} - -type urlBuildingTest struct { - route *Route - vars []string - url string -} - -var urlBuildingTests = []urlBuildingTest{ - { - route: new(Route).Host("foo.domain.com"), - vars: []string{}, - url: "http://foo.domain.com", - }, - { - route: new(Route).Host("{subdomain}.domain.com"), - vars: []string{"subdomain", "bar"}, - url: "http://bar.domain.com", - }, - { - route: new(Route).Host("foo.domain.com").Path("/articles"), - vars: []string{}, - url: "http://foo.domain.com/articles", - }, - { - route: new(Route).Path("/articles"), - vars: []string{}, - url: "/articles", - }, - { - route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"category", "technology", "id", "42"}, - url: "/articles/technology/42", - }, - { - route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, - url: "http://foo.domain.com/articles/technology/42", - }, -} - -func TestHeaderMatcher(t *testing.T) { - for _, v := range headerMatcherTests { - request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - for key, value := range v.headers { - request.Header.Add(key, value) - } - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, request.Header) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, request.Header) - } - } - } -} - -func TestHostMatcher(t *testing.T) { - for _, v := range hostMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestMethodMatcher(t *testing.T) { - for _, v := range methodMatcherTests { - request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.method) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.method) - } - } - } -} - -func TestPathMatcher(t *testing.T) { - for _, v := range pathMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestSchemeMatcher(t *testing.T) { - for _, v := range schemeMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestUrlBuilding(t *testing.T) { - - for _, v := range urlBuildingTests { - u, _ := v.route.URL(v.vars...) - url := u.String() - if url != v.url { - t.Errorf("expected %v, got %v", v.url, url) - /* - reversePath := "" - reverseHost := "" - if v.route.pathTemplate != nil { - reversePath = v.route.pathTemplate.Reverse - } - if v.route.hostTemplate != nil { - reverseHost = v.route.hostTemplate.Reverse - } - - t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) - */ - } - } - - ArticleHandler := func(w http.ResponseWriter, r *http.Request) { - } - - router := NewRouter() - router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") - - url, _ := router.Get("article").URL("category", "technology", "id", "42") - expected := "/articles/technology/42" - if url.String() != expected { - t.Errorf("Expected %v, got %v", expected, url.String()) - } -} - -func TestMatchedRouteName(t *testing.T) { - routeName := "stock" - router := NewRouter() - route := router.NewRoute().Path("/products/").Name(routeName) - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - retName := rv.Route.GetName() - if retName != routeName { - t.Errorf("Expected %q, got %q.", routeName, retName) - } -} - -func TestSubRouting(t *testing.T) { - // Example from docs. - router := NewRouter() - subrouter := router.NewRoute().Host("www.domain.com").Subrouter() - route := subrouter.NewRoute().Path("/products/").Name("products") - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - u, _ := router.Get("products").URL() - builtUrl := u.String() - // Yay, subroute aware of the domain when building! - if builtUrl != url { - t.Errorf("Expected %q, got %q.", url, builtUrl) - } -} - -func TestVariableNames(t *testing.T) { - route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") - if route.err == nil { - t.Errorf("Expected error for duplicated variable names") - } -} - -func TestRedirectSlash(t *testing.T) { - var route *Route - var routeMatch RouteMatch - r := NewRouter() - - r.StrictSlash(false) - route = r.NewRoute() - if route.strictSlash != false { - t.Errorf("Expected false redirectSlash.") - } - - r.StrictSlash(true) - route = r.NewRoute() - if route.strictSlash != true { - t.Errorf("Expected true redirectSlash.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}/") - request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars := routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp := NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { - t.Errorf("Expected redirect header.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}") - request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars = routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp = NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { - t.Errorf("Expected redirect header.") - } -} - -// Test for the new regexp library, still not available in stable Go. -func TestNewRegexp(t *testing.T) { - var p *routeRegexp - var matches []string - - tests := map[string]map[string][]string{ - "/{foo:a{2}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": nil, - "/aaaa": nil, - }, - "/{foo:a{2,}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": {"aaaa"}, - }, - "/{foo:a{2,3}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": nil, - }, - "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abcd": nil, - "/abc/ab": {"abc", "ab"}, - "/abc/abc": nil, - "/abcd/ab": nil, - }, - `/{foo:\w{3,}}/{bar:\d{2,}}`: { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abc/1": nil, - "/abc/12": {"abc", "12"}, - "/abcd/12": {"abcd", "12"}, - "/abcd/123": {"abcd", "123"}, - }, - } - - for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false, false) - for path, result := range paths { - matches = p.regexp.FindStringSubmatch(path) - if result == nil { - if matches != nil { - t.Errorf("%v should not match %v.", pattern, path) - } - } else { - if len(matches) != len(result)+1 { - t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) - } else { - for k, v := range result { - if matches[k+1] != v { - t.Errorf("Expected %v, got %v.", v, matches[k+1]) - } - } - } - } - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go deleted file mode 100644 index a6305483..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if matchQuery { - defaultPattern = "[^?&]+" - matchPrefix = true - } else if matchHost { - defaultPattern = "[^.]+" - matchPrefix = false - } - // Only match strict slash if not matching - if matchPrefix || matchHost || matchQuery { - strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - if !matchQuery { - pattern.WriteByte('^') - } - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if strictSlash { - pattern.WriteString("[/]?") - } - if !matchPrefix { - pattern.WriteByte('$') - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - // Done! - return &routeRegexp{ - template: template, - matchHost: matchHost, - matchQuery: matchQuery, - strictSlash: strictSlash, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // True for host match, false for path or query string match. - matchHost bool - // True for query string match, false for path and host match. - matchQuery bool - // The strictSlash value defined on the route, but disabled if PathPrefix was used. - strictSlash bool - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if !r.matchHost { - if r.matchQuery { - return r.regexp.MatchString(req.URL.RawQuery) - } else { - return r.regexp.MatchString(req.URL.Path) - } - } - return r.regexp.MatchString(getHost(req)) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(pairs ...string) (string, error) { - values, err := mapFromPairs(pairs...) - if err != nil { - return "", err - } - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - idxs := make([]int, 0) - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - for k, v := range v.host.varsN { - m.Vars[v] = hostVars[k+1] - } - } - } - // Store path variables. - if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - for k, v := range v.path.varsN { - m.Vars[v] = pathVars[k+1] - } - // Check if we should redirect. - if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), 301) - } - } - } - } - // Store query string variables. - rawQuery := req.URL.RawQuery - for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(rawQuery) - if queryVars != nil { - for k, v := range q.varsN { - m.Vars[v] = queryVars[k+1] - } - } - } -} - -// getHost tries its best to return the request host. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/route.go deleted file mode 100644 index c310e66b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute - // Request handler for the route. - handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - return false - } - } - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.getNamedRoutes()[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { - if r.err != nil { - return r.err - } - r.regexp = r.getRegexpGroup() - if !matchHost && !matchQuery { - if len(tpl) == 0 || tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if matchHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if matchQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairs(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.domain.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, true, false, false) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, false, false) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, true, false) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// It the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.domain.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } - var scheme, host, path string - var err error - if r.regexp.host != nil { - // Set a default scheme. - scheme = "http" - if host, err = r.regexp.host.url(pairs...); err != nil { - return nil, err - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(pairs...); err != nil { - return nil, err - } - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - host, err := r.regexp.host.url(pairs...) - if err != nil { - return nil, err - } - return &url.URL{ - Scheme: "http", - Host: host, - }, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - path, err := r.regexp.path.url(pairs...) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - queries: regexp.queries, - } - } - } - return r.regexp -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/.travis.yml deleted file mode 100644 index 7f3fe9a9..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.4 - -script: - - go test diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a5..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 659d6885..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index aa91f76c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,151 +0,0 @@ -package mapstructure - -import ( - "errors" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { - // Build our arguments that reflect expects - argVals := make([]reflect.Value, 3) - argVals[0] = reflect.ValueOf(from) - argVals[1] = reflect.ValueOf(to) - argVals[2] = reflect.ValueOf(data) - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from, to, data) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - var err error - for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) - if err != nil { - return nil, err - } - - // Modify the from kind to be correct with the new data - f = reflect.ValueOf(data).Type() - } - - return data, nil - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } else { - return "0", nil - } - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go deleted file mode 100644 index 53289afc..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package mapstructure - -import ( - "errors" - "reflect" - "testing" - "time" -) - -func TestComposeDecodeHookFunc(t *testing.T) { - f1 := func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - return data.(string) + "foo", nil - } - - f2 := func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - return data.(string) + "bar", nil - } - - f := ComposeDecodeHookFunc(f1, f2) - - result, err := DecodeHookExec( - f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") - if err != nil { - t.Fatalf("bad: %s", err) - } - if result.(string) != "foobar" { - t.Fatalf("bad: %#v", result) - } -} - -func TestComposeDecodeHookFunc_err(t *testing.T) { - f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { - return nil, errors.New("foo") - } - - f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { - panic("NOPE") - } - - f := ComposeDecodeHookFunc(f1, f2) - - _, err := DecodeHookExec( - f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), 42) - if err.Error() != "foo" { - t.Fatalf("bad: %s", err) - } -} - -func TestComposeDecodeHookFunc_kinds(t *testing.T) { - var f2From reflect.Kind - - f1 := func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - return int(42), nil - } - - f2 := func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - f2From = f - return data, nil - } - - f := ComposeDecodeHookFunc(f1, f2) - - _, err := DecodeHookExec( - f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") - if err != nil { - t.Fatalf("bad: %s", err) - } - if f2From != reflect.Int { - t.Fatalf("bad: %#v", f2From) - } -} - -func TestStringToSliceHookFunc(t *testing.T) { - f := StringToSliceHookFunc(",") - - strType := reflect.TypeOf("") - sliceType := reflect.TypeOf([]byte("")) - cases := []struct { - f, t reflect.Type - data interface{} - result interface{} - err bool - }{ - {sliceType, sliceType, 42, 42, false}, - {strType, strType, 42, 42, false}, - { - strType, - sliceType, - "foo,bar,baz", - []string{"foo", "bar", "baz"}, - false, - }, - { - strType, - sliceType, - "", - []string{}, - false, - }, - } - - for i, tc := range cases { - actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) - if tc.err != (err != nil) { - t.Fatalf("case %d: expected err %#v", i, tc.err) - } - if !reflect.DeepEqual(actual, tc.result) { - t.Fatalf( - "case %d: expected %#v, got %#v", - i, tc.result, actual) - } - } -} - -func TestStringToTimeDurationHookFunc(t *testing.T) { - f := StringToTimeDurationHookFunc() - - strType := reflect.TypeOf("") - timeType := reflect.TypeOf(time.Duration(5)) - cases := []struct { - f, t reflect.Type - data interface{} - result interface{} - err bool - }{ - {strType, timeType, "5s", 5 * time.Second, false}, - {strType, timeType, "5", time.Duration(0), true}, - {strType, strType, "5", "5", false}, - } - - for i, tc := range cases { - actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) - if tc.err != (err != nil) { - t.Fatalf("case %d: expected err %#v", i, tc.err) - } - if !reflect.DeepEqual(actual, tc.result) { - t.Fatalf( - "case %d: expected %#v, got %#v", - i, tc.result, actual) - } - } -} - -func TestWeaklyTypedHook(t *testing.T) { - var f DecodeHookFunc = WeaklyTypedHook - - boolType := reflect.TypeOf(true) - strType := reflect.TypeOf("") - sliceType := reflect.TypeOf([]byte("")) - cases := []struct { - f, t reflect.Type - data interface{} - result interface{} - err bool - }{ - // TO STRING - { - boolType, - strType, - false, - "0", - false, - }, - - { - boolType, - strType, - true, - "1", - false, - }, - - { - reflect.TypeOf(float32(1)), - strType, - float32(7), - "7", - false, - }, - - { - reflect.TypeOf(int(1)), - strType, - int(7), - "7", - false, - }, - - { - sliceType, - strType, - []uint8("foo"), - "foo", - false, - }, - - { - reflect.TypeOf(uint(1)), - strType, - uint(7), - "7", - false, - }, - } - - for i, tc := range cases { - actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) - if tc.err != (err != nil) { - t.Fatalf("case %d: expected err %#v", i, tc.err) - } - if !reflect.DeepEqual(actual, tc.result) { - t.Fatalf( - "case %d: expected %#v, got %#v", - i, tc.result, actual) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index d3cb4e8f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,746 +0,0 @@ -// The mapstructure package exposes functionality to convert an -// abitrary map[string]interface{} into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -package mapstructure - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. - // - // If an error is returned, the entire decode will fail with that - // error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - WeaklyTypedInput bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string -} - -// Decode takes a map and uses reflection to convert it into the -// given Go native structure. val must be a pointer to a struct. -func Decode(m interface{}, rawVal interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: rawVal, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(raw interface{}) error { - return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { - if data == nil { - // If the data is nil, then we don't set anything. - return nil - } - - dataVal := reflect.ValueOf(data) - if !dataVal.IsValid() { - // If the data value is invalid, then we just set the value - // to be the zero value. - val.Set(reflect.Zero(val.Type())) - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the data. - var err error - data, err = DecodeHookExec( - d.config.DecodeHook, - dataVal.Type(), val.Type(), data) - if err != nil { - return err - } - } - - var err error - dataKind := getKind(val) - switch dataKind { - case reflect.Bool: - err = d.decodeBool(name, data, val) - case reflect.Interface: - err = d.decodeBasic(name, data, val) - case reflect.String: - err = d.decodeString(name, data, val) - case reflect.Int: - err = d.decodeInt(name, data, val) - case reflect.Uint: - err = d.decodeUint(name, data, val) - case reflect.Float32: - err = d.decodeFloat(name, data, val) - case reflect.Struct: - err = d.decodeStruct(name, data, val) - case reflect.Map: - err = d.decodeMap(name, data, val) - case reflect.Ptr: - err = d.decodePtr(name, data, val) - case reflect.Slice: - err = d.decodeSlice(name, data, val) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, dataKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metadata. - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch { - case elemKind == reflect.Uint8: - val.SetString(string(dataVal.Interface().([]uint8))) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(float64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if dataVal.Kind() != reflect.Map { - // Accept empty array/slice instead of an empty map in weakly typed mode - if d.config.WeaklyTypedInput && - (dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) && - dataVal.Len() == 0 { - val.Set(valMap) - return nil - } else { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } - } - - // Accumulate errors - errors := make([]string, 0) - - for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - realVal := reflect.New(valElemType) - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err - } - - val.Set(realVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - // Accept empty map instead of array/slice in weakly typed mode - if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } else { - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - } - - // Make a new slice to hold our result, same size as the original data. - valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valSlice.Index(i) - - fieldName := fmt.Sprintf("%s[%d]", name, i) - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - if dataValKind != reflect.Map { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) - } - - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) - continue - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append(structs, val.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) - } - } - - for fieldType, field := range fields { - fieldName := fieldType.Name - - tagValue := fieldType.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey, _ := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if strings.EqualFold(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Just ignore. - continue - } - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - if !field.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !field.CanSet() { - continue - } - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - } - - if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { - errors = appendErrors(errors, err) - } - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey, _ := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey, _ := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = fmt.Sprintf("%s.%s", name, key) - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - } - - return nil -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go deleted file mode 100644 index b50ac36e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go +++ /dev/null @@ -1,243 +0,0 @@ -package mapstructure - -import ( - "testing" -) - -func Benchmark_Decode(b *testing.B) { - type Person struct { - Name string - Age int - Emails []string - Extra map[string]string - } - - input := map[string]interface{}{ - "name": "Mitchell", - "age": 91, - "emails": []string{"one", "two", "three"}, - "extra": map[string]string{ - "twitter": "mitchellh", - }, - } - - var result Person - for i := 0; i < b.N; i++ { - Decode(input, &result) - } -} - -func Benchmark_DecodeBasic(b *testing.B) { - input := map[string]interface{}{ - "vstring": "foo", - "vint": 42, - "Vuint": 42, - "vbool": true, - "Vfloat": 42.42, - "vsilent": true, - "vdata": 42, - } - - var result Basic - for i := 0; i < b.N; i++ { - Decode(input, &result) - } -} - -func Benchmark_DecodeEmbedded(b *testing.B) { - input := map[string]interface{}{ - "vstring": "foo", - "Basic": map[string]interface{}{ - "vstring": "innerfoo", - }, - "vunique": "bar", - } - - var result Embedded - for i := 0; i < b.N; i++ { - Decode(input, &result) - } -} - -func Benchmark_DecodeTypeConversion(b *testing.B) { - input := map[string]interface{}{ - "IntToFloat": 42, - "IntToUint": 42, - "IntToBool": 1, - "IntToString": 42, - "UintToInt": 42, - "UintToFloat": 42, - "UintToBool": 42, - "UintToString": 42, - "BoolToInt": true, - "BoolToUint": true, - "BoolToFloat": true, - "BoolToString": true, - "FloatToInt": 42.42, - "FloatToUint": 42.42, - "FloatToBool": 42.42, - "FloatToString": 42.42, - "StringToInt": "42", - "StringToUint": "42", - "StringToBool": "1", - "StringToFloat": "42.42", - "SliceToMap": []interface{}{}, - "MapToSlice": map[string]interface{}{}, - } - - var resultStrict TypeConversionResult - for i := 0; i < b.N; i++ { - Decode(input, &resultStrict) - } -} - -func Benchmark_DecodeMap(b *testing.B) { - input := map[string]interface{}{ - "vfoo": "foo", - "vother": map[interface{}]interface{}{ - "foo": "foo", - "bar": "bar", - }, - } - - var result Map - for i := 0; i < b.N; i++ { - Decode(input, &result) - } -} - -func Benchmark_DecodeMapOfStruct(b *testing.B) { - input := map[string]interface{}{ - "value": map[string]interface{}{ - "foo": map[string]string{"vstring": "one"}, - "bar": map[string]string{"vstring": "two"}, - }, - } - - var result MapOfStruct - for i := 0; i < b.N; i++ { - Decode(input, &result) - } -} - -func Benchmark_DecodeSlice(b *testing.B) { - input := map[string]interface{}{ - "vfoo": "foo", - "vbar": []string{"foo", "bar", "baz"}, - } - - var result Slice - for i := 0; i < b.N; i++ { - Decode(input, &result) - } -} - -func Benchmark_DecodeSliceOfStruct(b *testing.B) { - input := map[string]interface{}{ - "value": []map[string]interface{}{ - {"vstring": "one"}, - {"vstring": "two"}, - }, - } - - var result SliceOfStruct - for i := 0; i < b.N; i++ { - Decode(input, &result) - } -} - -func Benchmark_DecodeWeaklyTypedInput(b *testing.B) { - type Person struct { - Name string - Age int - Emails []string - } - - // This input can come from anywhere, but typically comes from - // something like decoding JSON, generated by a weakly typed language - // such as PHP. - input := map[string]interface{}{ - "name": 123, // number => string - "age": "42", // string => number - "emails": map[string]interface{}{}, // empty map => empty array - } - - var result Person - config := &DecoderConfig{ - WeaklyTypedInput: true, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - panic(err) - } - - for i := 0; i < b.N; i++ { - decoder.Decode(input) - } -} - -func Benchmark_DecodeMetadata(b *testing.B) { - type Person struct { - Name string - Age int - } - - input := map[string]interface{}{ - "name": "Mitchell", - "age": 91, - "email": "foo@bar.com", - } - - var md Metadata - var result Person - config := &DecoderConfig{ - Metadata: &md, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - panic(err) - } - - for i := 0; i < b.N; i++ { - decoder.Decode(input) - } -} - -func Benchmark_DecodeMetadataEmbedded(b *testing.B) { - input := map[string]interface{}{ - "vstring": "foo", - "vunique": "bar", - } - - var md Metadata - var result EmbeddedSquash - config := &DecoderConfig{ - Metadata: &md, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - b.Fatalf("err: %s", err) - } - - for i := 0; i < b.N; i++ { - decoder.Decode(input) - } -} - -func Benchmark_DecodeTagged(b *testing.B) { - input := map[string]interface{}{ - "foo": "bar", - "bar": "value", - } - - var result Tagged - for i := 0; i < b.N; i++ { - Decode(input, &result) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go deleted file mode 100644 index 7054f1ac..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package mapstructure - -import "testing" - -// GH-1 -func TestDecode_NilValue(t *testing.T) { - input := map[string]interface{}{ - "vfoo": nil, - "vother": nil, - } - - var result Map - err := Decode(input, &result) - if err != nil { - t.Fatalf("should not error: %s", err) - } - - if result.Vfoo != "" { - t.Fatalf("value should be default: %s", result.Vfoo) - } - - if result.Vother != nil { - t.Fatalf("Vother should be nil: %s", result.Vother) - } -} - -// GH-10 -func TestDecode_mapInterfaceInterface(t *testing.T) { - input := map[interface{}]interface{}{ - "vfoo": nil, - "vother": nil, - } - - var result Map - err := Decode(input, &result) - if err != nil { - t.Fatalf("should not error: %s", err) - } - - if result.Vfoo != "" { - t.Fatalf("value should be default: %s", result.Vfoo) - } - - if result.Vother != nil { - t.Fatalf("Vother should be nil: %s", result.Vother) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go deleted file mode 100644 index f17c214a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package mapstructure - -import ( - "fmt" -) - -func ExampleDecode() { - type Person struct { - Name string - Age int - Emails []string - Extra map[string]string - } - - // This input can come from anywhere, but typically comes from - // something like decoding JSON where we're not quite sure of the - // struct initially. - input := map[string]interface{}{ - "name": "Mitchell", - "age": 91, - "emails": []string{"one", "two", "three"}, - "extra": map[string]string{ - "twitter": "mitchellh", - }, - } - - var result Person - err := Decode(input, &result) - if err != nil { - panic(err) - } - - fmt.Printf("%#v", result) - // Output: - // mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}} -} - -func ExampleDecode_errors() { - type Person struct { - Name string - Age int - Emails []string - Extra map[string]string - } - - // This input can come from anywhere, but typically comes from - // something like decoding JSON where we're not quite sure of the - // struct initially. - input := map[string]interface{}{ - "name": 123, - "age": "bad value", - "emails": []int{1, 2, 3}, - } - - var result Person - err := Decode(input, &result) - if err == nil { - panic("should have an error") - } - - fmt.Println(err.Error()) - // Output: - // 5 error(s) decoding: - // - // * 'Age' expected type 'int', got unconvertible type 'string' - // * 'Emails[0]' expected type 'string', got unconvertible type 'int' - // * 'Emails[1]' expected type 'string', got unconvertible type 'int' - // * 'Emails[2]' expected type 'string', got unconvertible type 'int' - // * 'Name' expected type 'string', got unconvertible type 'int' -} - -func ExampleDecode_metadata() { - type Person struct { - Name string - Age int - } - - // This input can come from anywhere, but typically comes from - // something like decoding JSON where we're not quite sure of the - // struct initially. - input := map[string]interface{}{ - "name": "Mitchell", - "age": 91, - "email": "foo@bar.com", - } - - // For metadata, we make a more advanced DecoderConfig so we can - // more finely configure the decoder that is used. In this case, we - // just tell the decoder we want to track metadata. - var md Metadata - var result Person - config := &DecoderConfig{ - Metadata: &md, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - panic(err) - } - - if err := decoder.Decode(input); err != nil { - panic(err) - } - - fmt.Printf("Unused keys: %#v", md.Unused) - // Output: - // Unused keys: []string{"email"} -} - -func ExampleDecode_weaklyTypedInput() { - type Person struct { - Name string - Age int - Emails []string - } - - // This input can come from anywhere, but typically comes from - // something like decoding JSON, generated by a weakly typed language - // such as PHP. - input := map[string]interface{}{ - "name": 123, // number => string - "age": "42", // string => number - "emails": map[string]interface{}{}, // empty map => empty array - } - - var result Person - config := &DecoderConfig{ - WeaklyTypedInput: true, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - panic(err) - } - - err = decoder.Decode(input) - if err != nil { - panic(err) - } - - fmt.Printf("%#v", result) - // Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}} -} - -func ExampleDecode_tags() { - // Note that the mapstructure tags defined in the struct type - // can indicate which fields the values are mapped to. - type Person struct { - Name string `mapstructure:"person_name"` - Age int `mapstructure:"person_age"` - } - - input := map[string]interface{}{ - "person_name": "Mitchell", - "person_age": 91, - } - - var result Person - err := Decode(input, &result) - if err != nil { - panic(err) - } - - fmt.Printf("%#v", result) - // Output: - // mapstructure.Person{Name:"Mitchell", Age:91} -} - -func ExampleDecode_embeddedStruct() { - // Squashing multiple embedded structs is allowed using the squash tag. - // This is demonstrated by creating a composite struct of multiple types - // and decoding into it. In this case, a person can carry with it both - // a Family and a Location, as well as their own FirstName. - type Family struct { - LastName string - } - type Location struct { - City string - } - type Person struct { - Family `mapstructure:",squash"` - Location `mapstructure:",squash"` - FirstName string - } - - input := map[string]interface{}{ - "FirstName": "Mitchell", - "LastName": "Hashimoto", - "City": "San Francisco", - } - - var result Person - err := Decode(input, &result) - if err != nil { - panic(err) - } - - fmt.Printf("%s %s, %s", result.FirstName, result.LastName, result.City) - // Output: - // Mitchell Hashimoto, San Francisco -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go deleted file mode 100644 index e05dcc66..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go +++ /dev/null @@ -1,954 +0,0 @@ -package mapstructure - -import ( - "reflect" - "sort" - "testing" -) - -type Basic struct { - Vstring string - Vint int - Vuint uint - Vbool bool - Vfloat float64 - Vextra string - vsilent bool - Vdata interface{} -} - -type Embedded struct { - Basic - Vunique string -} - -type EmbeddedPointer struct { - *Basic - Vunique string -} - -type EmbeddedSquash struct { - Basic `mapstructure:",squash"` - Vunique string -} - -type Map struct { - Vfoo string - Vother map[string]string -} - -type MapOfStruct struct { - Value map[string]Basic -} - -type Nested struct { - Vfoo string - Vbar Basic -} - -type NestedPointer struct { - Vfoo string - Vbar *Basic -} - -type Slice struct { - Vfoo string - Vbar []string -} - -type SliceOfStruct struct { - Value []Basic -} - -type Tagged struct { - Extra string `mapstructure:"bar,what,what"` - Value string `mapstructure:"foo"` -} - -type TypeConversionResult struct { - IntToFloat float32 - IntToUint uint - IntToBool bool - IntToString string - UintToInt int - UintToFloat float32 - UintToBool bool - UintToString string - BoolToInt int - BoolToUint uint - BoolToFloat float32 - BoolToString string - FloatToInt int - FloatToUint uint - FloatToBool bool - FloatToString string - SliceUint8ToString string - StringToInt int - StringToUint uint - StringToBool bool - StringToFloat float32 - SliceToMap map[string]interface{} - MapToSlice []interface{} -} - -func TestBasicTypes(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vstring": "foo", - "vint": 42, - "Vuint": 42, - "vbool": true, - "Vfloat": 42.42, - "vsilent": true, - "vdata": 42, - } - - var result Basic - err := Decode(input, &result) - if err != nil { - t.Errorf("got an err: %s", err.Error()) - t.FailNow() - } - - if result.Vstring != "foo" { - t.Errorf("vstring value should be 'foo': %#v", result.Vstring) - } - - if result.Vint != 42 { - t.Errorf("vint value should be 42: %#v", result.Vint) - } - - if result.Vuint != 42 { - t.Errorf("vuint value should be 42: %#v", result.Vuint) - } - - if result.Vbool != true { - t.Errorf("vbool value should be true: %#v", result.Vbool) - } - - if result.Vfloat != 42.42 { - t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat) - } - - if result.Vextra != "" { - t.Errorf("vextra value should be empty: %#v", result.Vextra) - } - - if result.vsilent != false { - t.Error("vsilent should not be set, it is unexported") - } - - if result.Vdata != 42 { - t.Error("vdata should be valid") - } -} - -func TestBasic_IntWithFloat(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vint": float64(42), - } - - var result Basic - err := Decode(input, &result) - if err != nil { - t.Fatalf("got an err: %s", err) - } -} - -func TestDecode_Embedded(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vstring": "foo", - "Basic": map[string]interface{}{ - "vstring": "innerfoo", - }, - "vunique": "bar", - } - - var result Embedded - err := Decode(input, &result) - if err != nil { - t.Fatalf("got an err: %s", err.Error()) - } - - if result.Vstring != "innerfoo" { - t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring) - } - - if result.Vunique != "bar" { - t.Errorf("vunique value should be 'bar': %#v", result.Vunique) - } -} - -func TestDecode_EmbeddedPointer(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vstring": "foo", - "Basic": map[string]interface{}{ - "vstring": "innerfoo", - }, - "vunique": "bar", - } - - var result EmbeddedPointer - err := Decode(input, &result) - if err == nil { - t.Fatal("should get error") - } -} - -func TestDecode_EmbeddedSquash(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vstring": "foo", - "vunique": "bar", - } - - var result EmbeddedSquash - err := Decode(input, &result) - if err != nil { - t.Fatalf("got an err: %s", err.Error()) - } - - if result.Vstring != "foo" { - t.Errorf("vstring value should be 'foo': %#v", result.Vstring) - } - - if result.Vunique != "bar" { - t.Errorf("vunique value should be 'bar': %#v", result.Vunique) - } -} - -func TestDecode_DecodeHook(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vint": "WHAT", - } - - decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) { - if from == reflect.String && to != reflect.String { - return 5, nil - } - - return v, nil - } - - var result Basic - config := &DecoderConfig{ - DecodeHook: decodeHook, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = decoder.Decode(input) - if err != nil { - t.Fatalf("got an err: %s", err) - } - - if result.Vint != 5 { - t.Errorf("vint should be 5: %#v", result.Vint) - } -} - -func TestDecode_DecodeHookType(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vint": "WHAT", - } - - decodeHook := func(from reflect.Type, to reflect.Type, v interface{}) (interface{}, error) { - if from.Kind() == reflect.String && - to.Kind() != reflect.String { - return 5, nil - } - - return v, nil - } - - var result Basic - config := &DecoderConfig{ - DecodeHook: decodeHook, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = decoder.Decode(input) - if err != nil { - t.Fatalf("got an err: %s", err) - } - - if result.Vint != 5 { - t.Errorf("vint should be 5: %#v", result.Vint) - } -} - -func TestDecode_Nil(t *testing.T) { - t.Parallel() - - var input interface{} = nil - result := Basic{ - Vstring: "foo", - } - - err := Decode(input, &result) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result.Vstring != "foo" { - t.Fatalf("bad: %#v", result.Vstring) - } -} - -func TestDecode_NonStruct(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "foo": "bar", - "bar": "baz", - } - - var result map[string]string - err := Decode(input, &result) - if err != nil { - t.Fatalf("err: %s", err) - } - - if result["foo"] != "bar" { - t.Fatal("foo is not bar") - } -} - -func TestDecode_StructMatch(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vbar": Basic{ - Vstring: "foo", - }, - } - - var result Nested - err := Decode(input, &result) - if err != nil { - t.Fatalf("got an err: %s", err.Error()) - } - - if result.Vbar.Vstring != "foo" { - t.Errorf("bad: %#v", result) - } -} - -func TestDecode_TypeConversion(t *testing.T) { - input := map[string]interface{}{ - "IntToFloat": 42, - "IntToUint": 42, - "IntToBool": 1, - "IntToString": 42, - "UintToInt": 42, - "UintToFloat": 42, - "UintToBool": 42, - "UintToString": 42, - "BoolToInt": true, - "BoolToUint": true, - "BoolToFloat": true, - "BoolToString": true, - "FloatToInt": 42.42, - "FloatToUint": 42.42, - "FloatToBool": 42.42, - "FloatToString": 42.42, - "SliceUint8ToString": []uint8("foo"), - "StringToInt": "42", - "StringToUint": "42", - "StringToBool": "1", - "StringToFloat": "42.42", - "SliceToMap": []interface{}{}, - "MapToSlice": map[string]interface{}{}, - } - - expectedResultStrict := TypeConversionResult{ - IntToFloat: 42.0, - IntToUint: 42, - UintToInt: 42, - UintToFloat: 42, - BoolToInt: 0, - BoolToUint: 0, - BoolToFloat: 0, - FloatToInt: 42, - FloatToUint: 42, - } - - expectedResultWeak := TypeConversionResult{ - IntToFloat: 42.0, - IntToUint: 42, - IntToBool: true, - IntToString: "42", - UintToInt: 42, - UintToFloat: 42, - UintToBool: true, - UintToString: "42", - BoolToInt: 1, - BoolToUint: 1, - BoolToFloat: 1, - BoolToString: "1", - FloatToInt: 42, - FloatToUint: 42, - FloatToBool: true, - FloatToString: "42.42", - SliceUint8ToString: "foo", - StringToInt: 42, - StringToUint: 42, - StringToBool: true, - StringToFloat: 42.42, - SliceToMap: map[string]interface{}{}, - MapToSlice: []interface{}{}, - } - - // Test strict type conversion - var resultStrict TypeConversionResult - err := Decode(input, &resultStrict) - if err == nil { - t.Errorf("should return an error") - } - if !reflect.DeepEqual(resultStrict, expectedResultStrict) { - t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict) - } - - // Test weak type conversion - var decoder *Decoder - var resultWeak TypeConversionResult - - config := &DecoderConfig{ - WeaklyTypedInput: true, - Result: &resultWeak, - } - - decoder, err = NewDecoder(config) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = decoder.Decode(input) - if err != nil { - t.Fatalf("got an err: %s", err) - } - - if !reflect.DeepEqual(resultWeak, expectedResultWeak) { - t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak) - } -} - -func TestDecoder_ErrorUnused(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vstring": "hello", - "foo": "bar", - } - - var result Basic - config := &DecoderConfig{ - ErrorUnused: true, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = decoder.Decode(input) - if err == nil { - t.Fatal("expected error") - } -} - -func TestMap(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vfoo": "foo", - "vother": map[interface{}]interface{}{ - "foo": "foo", - "bar": "bar", - }, - } - - var result Map - err := Decode(input, &result) - if err != nil { - t.Fatalf("got an error: %s", err) - } - - if result.Vfoo != "foo" { - t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) - } - - if result.Vother == nil { - t.Fatal("vother should not be nil") - } - - if len(result.Vother) != 2 { - t.Error("vother should have two items") - } - - if result.Vother["foo"] != "foo" { - t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"]) - } - - if result.Vother["bar"] != "bar" { - t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"]) - } -} - -func TestMapMerge(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vfoo": "foo", - "vother": map[interface{}]interface{}{ - "foo": "foo", - "bar": "bar", - }, - } - - var result Map - result.Vother = map[string]string{"hello": "world"} - err := Decode(input, &result) - if err != nil { - t.Fatalf("got an error: %s", err) - } - - if result.Vfoo != "foo" { - t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) - } - - expected := map[string]string{ - "foo": "foo", - "bar": "bar", - "hello": "world", - } - if !reflect.DeepEqual(result.Vother, expected) { - t.Errorf("bad: %#v", result.Vother) - } -} - -func TestMapOfStruct(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "value": map[string]interface{}{ - "foo": map[string]string{"vstring": "one"}, - "bar": map[string]string{"vstring": "two"}, - }, - } - - var result MapOfStruct - err := Decode(input, &result) - if err != nil { - t.Fatalf("got an err: %s", err) - } - - if result.Value == nil { - t.Fatal("value should not be nil") - } - - if len(result.Value) != 2 { - t.Error("value should have two items") - } - - if result.Value["foo"].Vstring != "one" { - t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring) - } - - if result.Value["bar"].Vstring != "two" { - t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring) - } -} - -func TestNestedType(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vfoo": "foo", - "vbar": map[string]interface{}{ - "vstring": "foo", - "vint": 42, - "vbool": true, - }, - } - - var result Nested - err := Decode(input, &result) - if err != nil { - t.Fatalf("got an err: %s", err.Error()) - } - - if result.Vfoo != "foo" { - t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) - } - - if result.Vbar.Vstring != "foo" { - t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) - } - - if result.Vbar.Vint != 42 { - t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) - } - - if result.Vbar.Vbool != true { - t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) - } - - if result.Vbar.Vextra != "" { - t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) - } -} - -func TestNestedTypePointer(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vfoo": "foo", - "vbar": &map[string]interface{}{ - "vstring": "foo", - "vint": 42, - "vbool": true, - }, - } - - var result NestedPointer - err := Decode(input, &result) - if err != nil { - t.Fatalf("got an err: %s", err.Error()) - } - - if result.Vfoo != "foo" { - t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) - } - - if result.Vbar.Vstring != "foo" { - t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) - } - - if result.Vbar.Vint != 42 { - t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) - } - - if result.Vbar.Vbool != true { - t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) - } - - if result.Vbar.Vextra != "" { - t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) - } -} - -func TestSlice(t *testing.T) { - t.Parallel() - - inputStringSlice := map[string]interface{}{ - "vfoo": "foo", - "vbar": []string{"foo", "bar", "baz"}, - } - - inputStringSlicePointer := map[string]interface{}{ - "vfoo": "foo", - "vbar": &[]string{"foo", "bar", "baz"}, - } - - outputStringSlice := &Slice{ - "foo", - []string{"foo", "bar", "baz"}, - } - - testSliceInput(t, inputStringSlice, outputStringSlice) - testSliceInput(t, inputStringSlicePointer, outputStringSlice) -} - -func TestInvalidSlice(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vfoo": "foo", - "vbar": 42, - } - - result := Slice{} - err := Decode(input, &result) - if err == nil { - t.Errorf("expected failure") - } -} - -func TestSliceOfStruct(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "value": []map[string]interface{}{ - {"vstring": "one"}, - {"vstring": "two"}, - }, - } - - var result SliceOfStruct - err := Decode(input, &result) - if err != nil { - t.Fatalf("got unexpected error: %s", err) - } - - if len(result.Value) != 2 { - t.Fatalf("expected two values, got %d", len(result.Value)) - } - - if result.Value[0].Vstring != "one" { - t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring) - } - - if result.Value[1].Vstring != "two" { - t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring) - } -} - -func TestInvalidType(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vstring": 42, - } - - var result Basic - err := Decode(input, &result) - if err == nil { - t.Fatal("error should exist") - } - - derr, ok := err.(*Error) - if !ok { - t.Fatalf("error should be kind of Error, instead: %#v", err) - } - - if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" { - t.Errorf("got unexpected error: %s", err) - } - - inputNegIntUint := map[string]interface{}{ - "vuint": -42, - } - - err = Decode(inputNegIntUint, &result) - if err == nil { - t.Fatal("error should exist") - } - - derr, ok = err.(*Error) - if !ok { - t.Fatalf("error should be kind of Error, instead: %#v", err) - } - - if derr.Errors[0] != "cannot parse 'Vuint', -42 overflows uint" { - t.Errorf("got unexpected error: %s", err) - } - - inputNegFloatUint := map[string]interface{}{ - "vuint": -42.0, - } - - err = Decode(inputNegFloatUint, &result) - if err == nil { - t.Fatal("error should exist") - } - - derr, ok = err.(*Error) - if !ok { - t.Fatalf("error should be kind of Error, instead: %#v", err) - } - - if derr.Errors[0] != "cannot parse 'Vuint', -42.000000 overflows uint" { - t.Errorf("got unexpected error: %s", err) - } -} - -func TestMetadata(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vfoo": "foo", - "vbar": map[string]interface{}{ - "vstring": "foo", - "Vuint": 42, - "foo": "bar", - }, - "bar": "nil", - } - - var md Metadata - var result Nested - config := &DecoderConfig{ - Metadata: &md, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = decoder.Decode(input) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"} - sort.Strings(md.Keys) - if !reflect.DeepEqual(md.Keys, expectedKeys) { - t.Fatalf("bad keys: %#v", md.Keys) - } - - expectedUnused := []string{"Vbar.foo", "bar"} - if !reflect.DeepEqual(md.Unused, expectedUnused) { - t.Fatalf("bad unused: %#v", md.Unused) - } -} - -func TestMetadata_Embedded(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "vstring": "foo", - "vunique": "bar", - } - - var md Metadata - var result EmbeddedSquash - config := &DecoderConfig{ - Metadata: &md, - Result: &result, - } - - decoder, err := NewDecoder(config) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = decoder.Decode(input) - if err != nil { - t.Fatalf("err: %s", err.Error()) - } - - expectedKeys := []string{"Vstring", "Vunique"} - - sort.Strings(md.Keys) - if !reflect.DeepEqual(md.Keys, expectedKeys) { - t.Fatalf("bad keys: %#v", md.Keys) - } - - expectedUnused := []string{} - if !reflect.DeepEqual(md.Unused, expectedUnused) { - t.Fatalf("bad unused: %#v", md.Unused) - } -} - -func TestNonPtrValue(t *testing.T) { - t.Parallel() - - err := Decode(map[string]interface{}{}, Basic{}) - if err == nil { - t.Fatal("error should exist") - } - - if err.Error() != "result must be a pointer" { - t.Errorf("got unexpected error: %s", err) - } -} - -func TestTagged(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "foo": "bar", - "bar": "value", - } - - var result Tagged - err := Decode(input, &result) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if result.Value != "bar" { - t.Errorf("value should be 'bar', got: %#v", result.Value) - } - - if result.Extra != "value" { - t.Errorf("extra should be 'value', got: %#v", result.Extra) - } -} - -func TestWeakDecode(t *testing.T) { - t.Parallel() - - input := map[string]interface{}{ - "foo": "4", - "bar": "value", - } - - var result struct { - Foo int - Bar string - } - - if err := WeakDecode(input, &result); err != nil { - t.Fatalf("err: %s", err) - } - if result.Foo != 4 { - t.Fatalf("bad: %#v", result) - } - if result.Bar != "value" { - t.Fatalf("bad: %#v", result) - } -} - -func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) { - var result Slice - err := Decode(input, &result) - if err != nil { - t.Fatalf("got error: %s", err) - } - - if result.Vfoo != expected.Vfoo { - t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo) - } - - if result.Vbar == nil { - t.Fatalf("Vbar a slice, got '%#v'", result.Vbar) - } - - if len(result.Vbar) != len(expected.Vbar) { - t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar)) - } - - for i, v := range result.Vbar { - if v != expected.Vbar[i] { - t.Errorf( - "Vbar[%d] should be '%#v', got '%#v'", - i, expected.Vbar[i], v) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/.gitignore b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/.gitignore deleted file mode 100644 index 5cdbab79..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*~ -*.pyc -test-env* -junk/ \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/.travis.yml deleted file mode 100644 index 391b1d19..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - 1.1.2 - - 1.2.2 - - 1.3 - - tip - -script: - - go test diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/COPYING b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/COPYING deleted file mode 100644 index 8c27c67f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/COPYING +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/ - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/README.md deleted file mode 100644 index 993560ea..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/README.md +++ /dev/null @@ -1,128 +0,0 @@ -Swift -===== - -This package provides an easy to use library for interfacing with -Swift / Openstack Object Storage / Rackspace cloud files from the Go -Language - -See here for package docs - - http://godoc.org/github.com/ncw/swift - -[![Build Status](https://travis-ci.org/ncw/swift.png)](https://travis-ci.org/ncw/swift) - -Install -------- - -Use go to install the library - - go get github.com/ncw/swift - -Usage ------ - -See here for full package docs - -- http://godoc.org/github.com/ncw/swift - -Here is a short example from the docs - - import "github.com/ncw/swift" - - // Create a connection - c := swift.Connection{ - UserName: "user", - ApiKey: "key", - AuthUrl: "auth_url", - Domain: "domain", // Name of the domain (v3 auth only) - Tenant: "tenant", // Name of the tenant (v2 auth only) - } - // Authenticate - err := c.Authenticate() - if err != nil { - panic(err) - } - // List all the containers - containers, err := c.ContainerNames(nil) - fmt.Println(containers) - // etc... - -Additions ---------- - -The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface. - -Testing -------- - -To run the tests you can either use an embedded fake Swift server -either use a real Openstack Swift server or a Rackspace Cloud files account. - -When using a real Swift server, you need to set these environment variables -before running the tests - - export SWIFT_API_USER='user' - export SWIFT_API_KEY='key' - export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0' - -And optionally these if using v2 authentication - - export SWIFT_TENANT='TenantName' - export SWIFT_TENANT_ID='TenantId' - -And optionally these if using v3 authentication - - export SWIFT_TENANT='TenantName' - export SWIFT_TENANT_ID='TenantId' - export SWIFT_API_DOMAIN_ID='domain id' - export SWIFT_API_DOMAIN='domain name' - -And optionally this if you want to skip server certificate validation - - export SWIFT_AUTH_INSECURE=1 - -And optionally this to configure the connect channel timeout, in seconds - - export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60 - -And optionally this to configure the data channel timeout, in seconds - - export SWIFT_DATA_CHANNEL_TIMEOUT=60 - -Then run the tests with `go test` - -License -------- - -This is free software under the terms of MIT license (check COPYING file -included in this package). - -Contact and support -------------------- - -The project website is at: - -- https://github.com/ncw/swift - -There you can file bug reports, ask for help or contribute patches. - -Authors -------- - -- Nick Craig-Wood - -Contributors ------------- - -- Brian "bojo" Jones -- Janika Liiv -- Yamamoto, Hirotaka -- Stephen -- platformpurple -- Paul Querna -- Livio Soares -- thesyncim -- lsowen -- Sylvain Baubeau -- Chris Kastorff -- Dai HaoJun diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/auth.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/auth.go deleted file mode 100644 index ca35d237..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/auth.go +++ /dev/null @@ -1,283 +0,0 @@ -package swift - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "strings" -) - -// Auth defines the operations needed to authenticate with swift -// -// This encapsulates the different authentication schemes in use -type Authenticator interface { - Request(*Connection) (*http.Request, error) - Response(resp *http.Response) error - // The public storage URL - set Internal to true to read - // internal/service net URL - StorageUrl(Internal bool) string - // The access token - Token() string - // The CDN url if available - CdnUrl() string -} - -// newAuth - create a new Authenticator from the AuthUrl -// -// A hint for AuthVersion can be provided -func newAuth(c *Connection) (Authenticator, error) { - AuthVersion := c.AuthVersion - if AuthVersion == 0 { - if strings.Contains(c.AuthUrl, "v3") { - AuthVersion = 3 - } else if strings.Contains(c.AuthUrl, "v2") { - AuthVersion = 2 - } else if strings.Contains(c.AuthUrl, "v1") { - AuthVersion = 1 - } else { - return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly") - } - } - switch AuthVersion { - case 1: - return &v1Auth{}, nil - case 2: - return &v2Auth{ - // Guess as to whether using API key or - // password it will try both eventually so - // this is just an optimization. - useApiKey: len(c.ApiKey) >= 32, - }, nil - case 3: - return &v3Auth{}, nil - } - return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion) -} - -// ------------------------------------------------------------ - -// v1 auth -type v1Auth struct { - Headers http.Header // V1 auth: the authentication headers so extensions can access them -} - -// v1 Authentication - make request -func (auth *v1Auth) Request(c *Connection) (*http.Request, error) { - req, err := http.NewRequest("GET", c.AuthUrl, nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", c.UserAgent) - req.Header.Set("X-Auth-Key", c.ApiKey) - req.Header.Set("X-Auth-User", c.UserName) - return req, nil -} - -// v1 Authentication - read response -func (auth *v1Auth) Response(resp *http.Response) error { - auth.Headers = resp.Header - return nil -} - -// v1 Authentication - read storage url -func (auth *v1Auth) StorageUrl(Internal bool) string { - storageUrl := auth.Headers.Get("X-Storage-Url") - if Internal { - newUrl, err := url.Parse(storageUrl) - if err != nil { - return storageUrl - } - newUrl.Host = "snet-" + newUrl.Host - storageUrl = newUrl.String() - } - return storageUrl -} - -// v1 Authentication - read auth token -func (auth *v1Auth) Token() string { - return auth.Headers.Get("X-Auth-Token") -} - -// v1 Authentication - read cdn url -func (auth *v1Auth) CdnUrl() string { - return auth.Headers.Get("X-CDN-Management-Url") -} - -// ------------------------------------------------------------ - -// v2 Authentication -type v2Auth struct { - Auth *v2AuthResponse - Region string - useApiKey bool // if set will use API key not Password - useApiKeyOk bool // if set won't change useApiKey any more - notFirst bool // set after first run -} - -// v2 Authentication - make request -func (auth *v2Auth) Request(c *Connection) (*http.Request, error) { - auth.Region = c.Region - // Toggle useApiKey if not first run and not OK yet - if auth.notFirst && !auth.useApiKeyOk { - auth.useApiKey = !auth.useApiKey - } - auth.notFirst = true - // Create a V2 auth request for the body of the connection - var v2i interface{} - if !auth.useApiKey { - // Normal swift authentication - v2 := v2AuthRequest{} - v2.Auth.PasswordCredentials.UserName = c.UserName - v2.Auth.PasswordCredentials.Password = c.ApiKey - v2.Auth.Tenant = c.Tenant - v2.Auth.TenantId = c.TenantId - v2i = v2 - } else { - // Rackspace special with API Key - v2 := v2AuthRequestRackspace{} - v2.Auth.ApiKeyCredentials.UserName = c.UserName - v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey - v2.Auth.Tenant = c.Tenant - v2.Auth.TenantId = c.TenantId - v2i = v2 - } - body, err := json.Marshal(v2i) - if err != nil { - return nil, err - } - url := c.AuthUrl - if !strings.HasSuffix(url, "/") { - url += "/" - } - url += "tokens" - req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - return req, nil -} - -// v2 Authentication - read response -func (auth *v2Auth) Response(resp *http.Response) error { - auth.Auth = new(v2AuthResponse) - err := readJson(resp, auth.Auth) - // If successfully read Auth then no need to toggle useApiKey any more - if err == nil { - auth.useApiKeyOk = true - } - return err -} - -// Finds the Endpoint Url of "type" from the v2AuthResponse using the -// Region if set or defaulting to the first one if not -// -// Returns "" if not found -func (auth *v2Auth) endpointUrl(Type string, Internal bool) string { - for _, catalog := range auth.Auth.Access.ServiceCatalog { - if catalog.Type == Type { - for _, endpoint := range catalog.Endpoints { - if auth.Region == "" || (auth.Region == endpoint.Region) { - if Internal { - return endpoint.InternalUrl - } else { - return endpoint.PublicUrl - } - } - } - } - } - return "" -} - -// v2 Authentication - read storage url -// -// If Internal is true then it reads the private (internal / service -// net) URL. -func (auth *v2Auth) StorageUrl(Internal bool) string { - return auth.endpointUrl("object-store", Internal) -} - -// v2 Authentication - read auth token -func (auth *v2Auth) Token() string { - return auth.Auth.Access.Token.Id -} - -// v2 Authentication - read cdn url -func (auth *v2Auth) CdnUrl() string { - return auth.endpointUrl("rax:object-cdn", false) -} - -// ------------------------------------------------------------ - -// V2 Authentication request -// -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html -// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html -type v2AuthRequest struct { - Auth struct { - PasswordCredentials struct { - UserName string `json:"username"` - Password string `json:"password"` - } `json:"passwordCredentials"` - Tenant string `json:"tenantName,omitempty"` - TenantId string `json:"tenantId,omitempty"` - } `json:"auth"` -} - -// V2 Authentication request - Rackspace variant -// -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html -// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html -type v2AuthRequestRackspace struct { - Auth struct { - ApiKeyCredentials struct { - UserName string `json:"username"` - ApiKey string `json:"apiKey"` - } `json:"RAX-KSKEY:apiKeyCredentials"` - Tenant string `json:"tenantName,omitempty"` - TenantId string `json:"tenantId,omitempty"` - } `json:"auth"` -} - -// V2 Authentication reply -// -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html -// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html -type v2AuthResponse struct { - Access struct { - ServiceCatalog []struct { - Endpoints []struct { - InternalUrl string - PublicUrl string - Region string - TenantId string - } - Name string - Type string - } - Token struct { - Expires string - Id string - Tenant struct { - Id string - Name string - } - } - User struct { - DefaultRegion string `json:"RAX-AUTH:defaultRegion"` - Id string - Name string - Roles []struct { - Description string - Id string - Name string - TenantId string - } - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/auth_v3.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/auth_v3.go deleted file mode 100644 index efcb77e5..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/auth_v3.go +++ /dev/null @@ -1,207 +0,0 @@ -package swift - -import ( - "bytes" - "encoding/json" - "net/http" - "strings" -) - -const ( - v3AuthMethodToken = "token" - v3AuthMethodPassword = "password" - v3InterfacePublic = "public" - v3InterfaceInternal = "internal" - v3InterfaceAdmin = "admin" - v3CatalogTypeObjectStore = "object-store" -) - -// V3 Authentication request -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://developer.openstack.org/api-ref-identity-v3.html -type v3AuthRequest struct { - Auth struct { - Identity struct { - Methods []string `json:"methods"` - Password *v3AuthPassword `json:"password,omitempty"` - Token *v3AuthToken `json:"token,omitempty"` - } `json:"identity"` - Scope *v3Scope `json:"scope,omitempty"` - } `json:"auth"` -} - -type v3Scope struct { - Project *v3Project `json:"project,omitempty"` - Domain *v3Domain `json:"domain,omitempty"` -} - -type v3Domain struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` -} - -type v3Project struct { - Name string `json:"name,omitempty"` - Id string `json:"id,omitempty"` - Domain *v3Domain `json:"domain,omitempty"` -} - -type v3User struct { - Domain *v3Domain `json:"domain,omitempty"` - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Password string `json:"password,omitempty"` -} - -type v3AuthToken struct { - Id string `json:"id"` -} - -type v3AuthPassword struct { - User v3User `json:"user"` -} - -// V3 Authentication response -type v3AuthResponse struct { - Token struct { - Expires_At, Issued_At string - Methods []string - Roles []map[string]string - - Project struct { - Domain struct { - Id, Name string - } - Id, Name string - } - - Catalog []struct { - Id, Namem, Type string - Endpoints []struct { - Id, Region_Id, Url, Region, Interface string - } - } - - User struct { - Id, Name string - Domain struct { - Id, Name string - Links struct { - Self string - } - } - } - - Audit_Ids []string - } -} - -type v3Auth struct { - Auth *v3AuthResponse - Headers http.Header -} - -func (auth *v3Auth) Request(c *Connection) (*http.Request, error) { - - var v3i interface{} - - v3 := v3AuthRequest{} - - if c.UserName == "" { - v3.Auth.Identity.Methods = []string{v3AuthMethodToken} - v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey} - } else { - v3.Auth.Identity.Methods = []string{v3AuthMethodPassword} - v3.Auth.Identity.Password = &v3AuthPassword{ - User: v3User{ - Name: c.UserName, - Password: c.ApiKey, - }, - } - - var domain *v3Domain - - if c.Domain != "" { - domain = &v3Domain{Name: c.Domain} - } else if c.DomainId != "" { - domain = &v3Domain{Id: c.DomainId} - } - v3.Auth.Identity.Password.User.Domain = domain - } - - if c.TenantId != "" || c.Tenant != "" { - - v3.Auth.Scope = &v3Scope{Project: &v3Project{}} - - if c.TenantId != "" { - v3.Auth.Scope.Project.Id = c.TenantId - } else if c.Tenant != "" { - v3.Auth.Scope.Project.Name = c.Tenant - var defaultDomain v3Domain - if c.Domain != "" { - defaultDomain = v3Domain{Name: "Default"} - } else if c.DomainId != "" { - defaultDomain = v3Domain{Id: "Default"} - } - v3.Auth.Scope.Project.Domain = &defaultDomain - } - } - - v3i = v3 - - body, err := json.Marshal(v3i) - - if err != nil { - return nil, err - } - - url := c.AuthUrl - if !strings.HasSuffix(url, "/") { - url += "/" - } - url += "tokens" - req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - return req, nil -} - -func (auth *v3Auth) Response(resp *http.Response) error { - auth.Auth = &v3AuthResponse{} - auth.Headers = resp.Header - err := readJson(resp, auth.Auth) - return err -} - -func (auth *v3Auth) endpointUrl(Type string, Internal bool) string { - for _, catalog := range auth.Auth.Token.Catalog { - if catalog.Type == Type { - for _, endpoint := range catalog.Endpoints { - if Internal { - if endpoint.Interface == v3InterfaceInternal { - return endpoint.Url - } - } else { - if endpoint.Interface == v3InterfacePublic { - return endpoint.Url - } - } - } - } - } - return "" -} - -func (auth *v3Auth) StorageUrl(Internal bool) string { - return auth.endpointUrl(v3CatalogTypeObjectStore, Internal) -} - -func (auth *v3Auth) Token() string { - return auth.Headers.Get("X-Subject-Token") -} - -func (auth *v3Auth) CdnUrl() string { - return "" -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_0.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_0.go deleted file mode 100644 index 7b69a757..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_0.go +++ /dev/null @@ -1,28 +0,0 @@ -// Go 1.0 compatibility functions - -// +build !go1.1 - -package swift - -import ( - "log" - "net/http" - "time" -) - -// Cancel the request - doesn't work under < go 1.1 -func cancelRequest(transport http.RoundTripper, req *http.Request) { - log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1") -} - -// Reset a timer - Doesn't work properly < go 1.1 -// -// This is quite hard to do properly under go < 1.1 so we do a crude -// approximation and hope that everyone upgrades to go 1.1 quickly -func resetTimer(t *time.Timer, d time.Duration) { - t.Stop() - // Very likely this doesn't actually work if we are already - // selecting on t.C. However we've stopped the original timer - // so won't break transfers but may not time them out :-( - *t = *time.NewTimer(d) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_1.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_1.go deleted file mode 100644 index a4f9c3ab..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_1.go +++ /dev/null @@ -1,24 +0,0 @@ -// Go 1.1 and later compatibility functions -// -// +build go1.1 - -package swift - -import ( - "net/http" - "time" -) - -// Cancel the request -func cancelRequest(transport http.RoundTripper, req *http.Request) { - if tr, ok := transport.(interface { - CancelRequest(*http.Request) - }); ok { - tr.CancelRequest(req) - } -} - -// Reset a timer -func resetTimer(t *time.Timer, d time.Duration) { - t.Reset(d) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/doc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/doc.go deleted file mode 100644 index 44efde7b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files - -Standard Usage - -Most of the work is done through the Container*() and Object*() methods. - -All methods are safe to use concurrently in multiple go routines. - -Object Versioning - -As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system. - -Rackspace Sub Module - -This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects. - -*/ -package swift diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/example_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/example_test.go deleted file mode 100644 index 30cb9834..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/example_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright... - -// This example demonstrates opening a Connection and doing some basic operations. -package swift_test - -import ( - "fmt" - - "github.com/ncw/swift" -) - -func ExampleConnection() { - // Create a v1 auth connection - c := swift.Connection{ - // This should be your username - UserName: "user", - // This should be your api key - ApiKey: "key", - // This should be a v1 auth url, eg - // Rackspace US https://auth.api.rackspacecloud.com/v1.0 - // Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 - // Memset Memstore UK https://auth.storage.memset.com/v1.0 - AuthUrl: "auth_url", - } - - // Authenticate - err := c.Authenticate() - if err != nil { - panic(err) - } - // List all the containers - containers, err := c.ContainerNames(nil) - fmt.Println(containers) - // etc... - - // ------ or alternatively create a v2 connection ------ - - // Create a v2 auth connection - c = swift.Connection{ - // This is the sub user for the storage - eg "admin" - UserName: "user", - // This should be your api key - ApiKey: "key", - // This should be a version2 auth url, eg - // Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 - // Memset Memstore v2 https://auth.storage.memset.com/v2.0 - AuthUrl: "v2_auth_url", - // Region to use - default is use first region if unset - Region: "LON", - // Name of the tenant - this is likely your username - Tenant: "jim", - } - - // as above... -} - -var container string - -func ExampleConnection_ObjectsWalk() { - objects := make([]string, 0) - err := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) { - newObjects, err := c.ObjectNames(container, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - fmt.Println("Found all the objects", objects, err) -} - -func ExampleConnection_VersionContainerCreate() { - // Use the helper method to create the current and versions container. - if err := c.VersionContainerCreate("cds", "cd-versions"); err != nil { - fmt.Print(err.Error()) - } -} - -func ExampleConnection_VersionEnable() { - // Build the containers manually and enable them. - if err := c.ContainerCreate("movie-versions", nil); err != nil { - fmt.Print(err.Error()) - } - if err := c.ContainerCreate("movies", nil); err != nil { - fmt.Print(err.Error()) - } - if err := c.VersionEnable("movies", "movie-versions"); err != nil { - fmt.Print(err.Error()) - } - - // Access the primary container as usual with ObjectCreate(), ObjectPut(), etc. - // etc... -} - -func ExampleConnection_VersionDisable() { - // Disable versioning on a container. Note that this does not delete the versioning container. - c.VersionDisable("movies") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/meta.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/meta.go deleted file mode 100644 index e52d6860..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/meta.go +++ /dev/null @@ -1,174 +0,0 @@ -// Metadata manipulation in and out of Headers - -package swift - -import ( - "fmt" - "net/http" - "strconv" - "strings" - "time" -) - -// Metadata stores account, container or object metadata. -type Metadata map[string]string - -// Metadata gets the Metadata starting with the metaPrefix out of the Headers. -// -// The keys in the Metadata will be converted to lower case -func (h Headers) Metadata(metaPrefix string) Metadata { - m := Metadata{} - metaPrefix = http.CanonicalHeaderKey(metaPrefix) - for key, value := range h { - if strings.HasPrefix(key, metaPrefix) { - metaKey := strings.ToLower(key[len(metaPrefix):]) - m[metaKey] = value - } - } - return m -} - -// AccountMetadata converts Headers from account to a Metadata. -// -// The keys in the Metadata will be converted to lower case. -func (h Headers) AccountMetadata() Metadata { - return h.Metadata("X-Account-Meta-") -} - -// ContainerMetadata converts Headers from container to a Metadata. -// -// The keys in the Metadata will be converted to lower case. -func (h Headers) ContainerMetadata() Metadata { - return h.Metadata("X-Container-Meta-") -} - -// ObjectMetadata converts Headers from object to a Metadata. -// -// The keys in the Metadata will be converted to lower case. -func (h Headers) ObjectMetadata() Metadata { - return h.Metadata("X-Object-Meta-") -} - -// Headers convert the Metadata starting with the metaPrefix into a -// Headers. -// -// The keys in the Metadata will be converted from lower case to http -// Canonical (see http.CanonicalHeaderKey). -func (m Metadata) Headers(metaPrefix string) Headers { - h := Headers{} - for key, value := range m { - key = http.CanonicalHeaderKey(metaPrefix + key) - h[key] = value - } - return h -} - -// AccountHeaders converts the Metadata for the account. -func (m Metadata) AccountHeaders() Headers { - return m.Headers("X-Account-Meta-") -} - -// ContainerHeaders converts the Metadata for the container. -func (m Metadata) ContainerHeaders() Headers { - return m.Headers("X-Container-Meta-") -} - -// ObjectHeaders converts the Metadata for the object. -func (m Metadata) ObjectHeaders() Headers { - return m.Headers("X-Object-Meta-") -} - -// Turns a number of ns into a floating point string in seconds -// -// Trims trailing zeros and guaranteed to be perfectly accurate -func nsToFloatString(ns int64) string { - if ns < 0 { - return "-" + nsToFloatString(-ns) - } - result := fmt.Sprintf("%010d", ns) - split := len(result) - 9 - result, decimals := result[:split], result[split:] - decimals = strings.TrimRight(decimals, "0") - if decimals != "" { - result += "." - result += decimals - } - return result -} - -// Turns a floating point string in seconds into a ns integer -// -// Guaranteed to be perfectly accurate -func floatStringToNs(s string) (int64, error) { - const zeros = "000000000" - if point := strings.IndexRune(s, '.'); point >= 0 { - tail := s[point+1:] - if fill := 9 - len(tail); fill < 0 { - tail = tail[:9] - } else { - tail += zeros[:fill] - } - s = s[:point] + tail - } else if len(s) > 0 { // Make sure empty string produces an error - s += zeros - } - return strconv.ParseInt(s, 10, 64) -} - -// FloatStringToTime converts a floating point number string to a time.Time -// -// The string is floating point number of seconds since the epoch -// (Unix time). The number should be in fixed point format (not -// exponential), eg "1354040105.123456789" which represents the time -// "2012-11-27T18:15:05.123456789Z" -// -// Some care is taken to preserve all the accuracy in the time.Time -// (which wouldn't happen with a naive conversion through float64) so -// a round trip conversion won't change the data. -// -// If an error is returned then time will be returned as the zero time. -func FloatStringToTime(s string) (t time.Time, err error) { - ns, err := floatStringToNs(s) - if err != nil { - return - } - t = time.Unix(0, ns) - return -} - -// TimeToFloatString converts a time.Time object to a floating point string -// -// The string is floating point number of seconds since the epoch -// (Unix time). The number is in fixed point format (not -// exponential), eg "1354040105.123456789" which represents the time -// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped -// from the output. -// -// Some care is taken to preserve all the accuracy in the time.Time -// (which wouldn't happen with a naive conversion through float64) so -// a round trip conversion won't change the data. -func TimeToFloatString(t time.Time) string { - return nsToFloatString(t.UnixNano()) -} - -// Read a modification time (mtime) from a Metadata object -// -// This is a defacto standard (used in the official python-swiftclient -// amongst others) for storing the modification time (as read using -// os.Stat) for an object. It is stored using the key 'mtime', which -// for example when written to an object will be 'X-Object-Meta-Mtime'. -// -// If an error is returned then time will be returned as the zero time. -func (m Metadata) GetModTime() (t time.Time, err error) { - return FloatStringToTime(m["mtime"]) -} - -// Write an modification time (mtime) to a Metadata object -// -// This is a defacto standard (used in the official python-swiftclient -// amongst others) for storing the modification time (as read using -// os.Stat) for an object. It is stored using the key 'mtime', which -// for example when written to an object will be 'X-Object-Meta-Mtime'. -func (m Metadata) SetModTime(t time.Time) { - m["mtime"] = TimeToFloatString(t) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go deleted file mode 100644 index 47560d57..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go +++ /dev/null @@ -1,213 +0,0 @@ -// Tests for swift metadata -package swift - -import ( - "testing" - "time" -) - -func TestHeadersToMetadata(t *testing.T) { -} - -func TestHeadersToAccountMetadata(t *testing.T) { -} - -func TestHeadersToContainerMetadata(t *testing.T) { -} - -func TestHeadersToObjectMetadata(t *testing.T) { -} - -func TestMetadataToHeaders(t *testing.T) { -} - -func TestMetadataToAccountHeaders(t *testing.T) { -} - -func TestMetadataToContainerHeaders(t *testing.T) { -} - -func TestMetadataToObjectHeaders(t *testing.T) { -} - -func TestNsToFloatString(t *testing.T) { - for _, d := range []struct { - ns int64 - fs string - }{ - {0, "0"}, - {1, "0.000000001"}, - {1000, "0.000001"}, - {1000000, "0.001"}, - {100000000, "0.1"}, - {1000000000, "1"}, - {10000000000, "10"}, - {12345678912, "12.345678912"}, - {12345678910, "12.34567891"}, - {12345678900, "12.3456789"}, - {12345678000, "12.345678"}, - {12345670000, "12.34567"}, - {12345600000, "12.3456"}, - {12345000000, "12.345"}, - {12340000000, "12.34"}, - {12300000000, "12.3"}, - {12000000000, "12"}, - {10000000000, "10"}, - {1347717491123123123, "1347717491.123123123"}, - } { - if nsToFloatString(d.ns) != d.fs { - t.Error("Failed", d.ns, "!=", d.fs) - } - if d.ns > 0 && nsToFloatString(-d.ns) != "-"+d.fs { - t.Error("Failed on negative", d.ns, "!=", d.fs) - } - } -} - -func TestFloatStringToNs(t *testing.T) { - for _, d := range []struct { - ns int64 - fs string - }{ - {0, "0"}, - {0, "0."}, - {0, ".0"}, - {0, "0.0"}, - {0, "0.0000000001"}, - {1, "0.000000001"}, - {1000, "0.000001"}, - {1000000, "0.001"}, - {100000000, "0.1"}, - {100000000, "0.10"}, - {100000000, "0.1000000001"}, - {1000000000, "1"}, - {1000000000, "1."}, - {1000000000, "1.0"}, - {10000000000, "10"}, - {12345678912, "12.345678912"}, - {12345678912, "12.3456789129"}, - {12345678912, "12.34567891299"}, - {12345678910, "12.34567891"}, - {12345678900, "12.3456789"}, - {12345678000, "12.345678"}, - {12345670000, "12.34567"}, - {12345600000, "12.3456"}, - {12345000000, "12.345"}, - {12340000000, "12.34"}, - {12300000000, "12.3"}, - {12000000000, "12"}, - {10000000000, "10"}, - // This is a typical value which has more bits in than a float64 - {1347717491123123123, "1347717491.123123123"}, - } { - ns, err := floatStringToNs(d.fs) - if err != nil { - t.Error("Failed conversion", err) - } - if ns != d.ns { - t.Error("Failed", d.fs, "!=", d.ns, "was", ns) - } - if d.ns > 0 { - ns, err := floatStringToNs("-" + d.fs) - if err != nil { - t.Error("Failed conversion", err) - } - if ns != -d.ns { - t.Error("Failed on negative", -d.ns, "!=", "-"+d.fs) - } - } - } - - // These are expected to produce errors - for _, fs := range []string{ - "", - " 1", - "- 1", - "- 1", - "1.-1", - "1.0.0", - "1x0", - } { - ns, err := floatStringToNs(fs) - if err == nil { - t.Error("Didn't produce expected error", fs, ns) - } - } - -} - -func TestGetModTime(t *testing.T) { - for _, d := range []struct { - ns string - t string - }{ - {"1354040105", "2012-11-27T18:15:05Z"}, - {"1354040105.", "2012-11-27T18:15:05Z"}, - {"1354040105.0", "2012-11-27T18:15:05Z"}, - {"1354040105.000000000000", "2012-11-27T18:15:05Z"}, - {"1354040105.123", "2012-11-27T18:15:05.123Z"}, - {"1354040105.123456", "2012-11-27T18:15:05.123456Z"}, - {"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"}, - {"1354040105.123456789123", "2012-11-27T18:15:05.123456789Z"}, - {"0", "1970-01-01T00:00:00.000000000Z"}, - } { - expected, err := time.Parse(time.RFC3339, d.t) - if err != nil { - t.Error("Bad test", err) - } - m := Metadata{"mtime": d.ns} - actual, err := m.GetModTime() - if err != nil { - t.Error("Parse error", err) - } - if !actual.Equal(expected) { - t.Error("Expecting", expected, expected.UnixNano(), "got", actual, actual.UnixNano()) - } - } - for _, ns := range []string{ - "EMPTY", - "", - " 1", - "- 1", - "- 1", - "1.-1", - "1.0.0", - "1x0", - } { - m := Metadata{} - if ns != "EMPTY" { - m["mtime"] = ns - } - actual, err := m.GetModTime() - if err == nil { - t.Error("Expected error not produced") - } - if !actual.IsZero() { - t.Error("Expected output to be zero") - } - } -} - -func TestSetModTime(t *testing.T) { - for _, d := range []struct { - ns string - t string - }{ - {"1354040105", "2012-11-27T18:15:05Z"}, - {"1354040105", "2012-11-27T18:15:05.000000Z"}, - {"1354040105.123", "2012-11-27T18:15:05.123Z"}, - {"1354040105.123456", "2012-11-27T18:15:05.123456Z"}, - {"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"}, - {"0", "1970-01-01T00:00:00.000000000Z"}, - } { - time, err := time.Parse(time.RFC3339, d.t) - if err != nil { - t.Error("Bad test", err) - } - m := Metadata{} - m.SetModTime(time) - if m["mtime"] != d.ns { - t.Error("mtime wrong", m, "should be", d.ns) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/notes.txt b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/notes.txt deleted file mode 100644 index f738552c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/notes.txt +++ /dev/null @@ -1,55 +0,0 @@ -Notes on Go Swift -================= - -Make a builder style interface like the Google Go APIs? Advantages -are that it is easy to add named methods to the service object to do -specific things. Slightly less efficient. Not sure about how to -return extra stuff though - in an object? - -Make a container struct so these could be methods on it? - -Make noResponse check for 204? - -Make storage public so it can be extended easily? - -Rename to go-swift to match user agent string? - -Reconnect on auth error - 401 when token expires isn't tested - -Make more api compatible with python cloudfiles? - -Retry operations on timeout / network errors? -- also 408 error -- GET requests only? - -Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock - -Add extra headers field to Connection (for via etc) - -Make errors use an error heirachy then can catch them with a type assertion - - Error(...) - ObjectCorrupted{ Error } - -Make a Debug flag in connection for logging stuff - -Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc - -Object range - -Object create, update with X-Delete-At or X-Delete-After - -Large object support -- check uploads are less than 5GB in normal mode? - -Access control CORS? - -Swift client retries and backs off for all types of errors - -Implement net error interface? - -type Error interface { - error - Timeout() bool // Is the error a timeout? - Temporary() bool // Is the error temporary? -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/rs/rs.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/rs/rs.go deleted file mode 100644 index 34ee15a0..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/rs/rs.go +++ /dev/null @@ -1,83 +0,0 @@ -package rs - -import ( - "errors" - "net/http" - "strconv" - - "github.com/ncw/swift" -) - -// RsConnection is a RackSpace specific wrapper to the core swift library which -// exposes the RackSpace CDN commands via the CDN Management URL interface. -type RsConnection struct { - swift.Connection - cdnUrl string -} - -// manage is similar to the swift storage method, but uses the CDN Management URL for CDN specific calls. -func (c *RsConnection) manage(p swift.RequestOpts) (resp *http.Response, headers swift.Headers, err error) { - p.OnReAuth = func() (string, error) { - if c.cdnUrl == "" { - c.cdnUrl = c.Auth.CdnUrl() - } - if c.cdnUrl == "" { - return "", errors.New("The X-CDN-Management-Url does not exist on the authenticated platform") - } - return c.cdnUrl, nil - } - if c.Authenticated() { - _, err = p.OnReAuth() - if err != nil { - return nil, nil, err - } - } - return c.Connection.Call(c.cdnUrl, p) -} - -// ContainerCDNEnable enables a container for public CDN usage. -// -// Change the default TTL of 259200 seconds (72 hours) by passing in an integer value. -// -// This method can be called again to change the TTL. -func (c *RsConnection) ContainerCDNEnable(container string, ttl int) (swift.Headers, error) { - h := swift.Headers{"X-CDN-Enabled": "true"} - if ttl > 0 { - h["X-TTL"] = strconv.Itoa(ttl) - } - - _, headers, err := c.manage(swift.RequestOpts{ - Container: container, - Operation: "PUT", - ErrorMap: swift.ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return headers, err -} - -// ContainerCDNDisable disables CDN access to a container. -func (c *RsConnection) ContainerCDNDisable(container string) error { - h := swift.Headers{"X-CDN-Enabled": "false"} - - _, _, err := c.manage(swift.RequestOpts{ - Container: container, - Operation: "PUT", - ErrorMap: swift.ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ContainerCDNMeta returns the CDN metadata for a container. -func (c *RsConnection) ContainerCDNMeta(container string) (swift.Headers, error) { - _, headers, err := c.manage(swift.RequestOpts{ - Container: container, - Operation: "HEAD", - ErrorMap: swift.ContainerErrorMap, - NoResponse: true, - Headers: swift.Headers{}, - }) - return headers, err -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go deleted file mode 100644 index 74205154..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// See swift_test.go for requirements to run this test. -package rs_test - -import ( - "os" - "testing" - - "github.com/ncw/swift/rs" -) - -var ( - c rs.RsConnection -) - -const ( - CONTAINER = "GoSwiftUnitTest" - OBJECT = "test_object" - CONTENTS = "12345" - CONTENT_SIZE = int64(len(CONTENTS)) - CONTENT_MD5 = "827ccb0eea8a706c4c34a16891f84e7b" -) - -// Test functions are run in order - this one must be first! -func TestAuthenticate(t *testing.T) { - UserName := os.Getenv("SWIFT_API_USER") - ApiKey := os.Getenv("SWIFT_API_KEY") - AuthUrl := os.Getenv("SWIFT_AUTH_URL") - if UserName == "" || ApiKey == "" || AuthUrl == "" { - t.Fatal("SWIFT_API_USER, SWIFT_API_KEY and SWIFT_AUTH_URL not all set") - } - c = rs.RsConnection{} - c.UserName = UserName - c.ApiKey = ApiKey - c.AuthUrl = AuthUrl - err := c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} - -// Setup -func TestContainerCreate(t *testing.T) { - err := c.ContainerCreate(CONTAINER, nil) - if err != nil { - t.Fatal(err) - } -} - -func TestCDNEnable(t *testing.T) { - headers, err := c.ContainerCDNEnable(CONTAINER, 0) - if err != nil { - t.Error(err) - } - if _, ok := headers["X-Cdn-Uri"]; !ok { - t.Error("Failed to enable CDN for container") - } -} - -func TestOnReAuth(t *testing.T) { - c2 := rs.RsConnection{} - c2.UserName = c.UserName - c2.ApiKey = c.ApiKey - c2.AuthUrl = c.AuthUrl - _, err := c2.ContainerCDNEnable(CONTAINER, 0) - if err != nil { - t.Fatalf("Failed to reauthenticate: %v", err) - } -} - -func TestCDNMeta(t *testing.T) { - headers, err := c.ContainerCDNMeta(CONTAINER) - if err != nil { - t.Error(err) - } - if _, ok := headers["X-Cdn-Uri"]; !ok { - t.Error("CDN is not enabled") - } -} - -func TestCDNDisable(t *testing.T) { - err := c.ContainerCDNDisable(CONTAINER) // files stick in CDN until TTL expires - if err != nil { - t.Error(err) - } -} - -// Teardown -func TestContainerDelete(t *testing.T) { - err := c.ContainerDelete(CONTAINER) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift.go deleted file mode 100644 index db8eba10..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift.go +++ /dev/null @@ -1,1841 +0,0 @@ -package swift - -import ( - "bufio" - "bytes" - "crypto/md5" - "encoding/json" - "fmt" - "hash" - "io" - "mime" - "net/http" - "net/url" - "path" - "strconv" - "strings" - "sync" - "time" -) - -const ( - DefaultUserAgent = "goswift/1.0" // Default user agent - DefaultRetries = 3 // Default number of retries on token expiry - TimeFormat = "2006-01-02T15:04:05" // Python date format for json replies parsed as UTC - UploadTar = "tar" // Data format specifier for Connection.BulkUpload(). - UploadTarGzip = "tar.gz" // Data format specifier for Connection.BulkUpload(). - UploadTarBzip2 = "tar.bz2" // Data format specifier for Connection.BulkUpload(). - allContainersLimit = 10000 // Number of containers to fetch at once - allObjectsLimit = 10000 // Number objects to fetch at once - allObjectsChanLimit = 1000 // ...when fetching to a channel -) - -// Connection holds the details of the connection to the swift server. -// -// You need to provide UserName, ApiKey and AuthUrl when you create a -// connection then call Authenticate on it. -// -// The auth version in use will be detected from the AuthURL - you can -// override this with the AuthVersion parameter. -// -// If using v2 auth you can also set Region in the Connection -// structure. If you don't set Region you will get the default region -// which may not be what you want. -// -// For reference some common AuthUrls looks like this: -// -// Rackspace US https://auth.api.rackspacecloud.com/v1.0 -// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 -// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 -// Memset Memstore UK https://auth.storage.memset.com/v1.0 -// Memstore v2 https://auth.storage.memset.com/v2.0 -// -// When using Google Appengine you must provide the Connection with an -// appengine-specific Transport: -// -// import ( -// "appengine/urlfetch" -// "fmt" -// "github.com/ncw/swift" -// ) -// -// func handler(w http.ResponseWriter, r *http.Request) { -// ctx := appengine.NewContext(r) -// tr := urlfetch.Transport{Context: ctx} -// c := swift.Connection{ -// UserName: "user", -// ApiKey: "key", -// AuthUrl: "auth_url", -// Transport: tr, -// } -// _ := c.Authenticate() -// containers, _ := c.ContainerNames(nil) -// fmt.Fprintf(w, "containers: %q", containers) -// } -// -// If you don't supply a Transport, one is made which relies on -// http.ProxyFromEnvironment (http://golang.org/pkg/net/http/#ProxyFromEnvironment). -// This means that the connection will respect the HTTP proxy specified by the -// environment variables $HTTP_PROXY and $NO_PROXY. -type Connection struct { - // Parameters - fill these in before calling Authenticate - // They are all optional except UserName, ApiKey and AuthUrl - Domain string // User's domain name - DomainId string // User's domain Id - UserName string // UserName for api - ApiKey string // Key for api access - AuthUrl string // Auth URL - Retries int // Retries on error (default is 3) - UserAgent string // Http User agent (default goswift/1.0) - ConnectTimeout time.Duration // Connect channel timeout (default 10s) - Timeout time.Duration // Data channel timeout (default 60s) - Region string // Region to use eg "LON", "ORD" - default is use first region (V2 auth only) - AuthVersion int // Set to 1 or 2 or leave at 0 for autodetect - Internal bool // Set this to true to use the the internal / service network - Tenant string // Name of the tenant (v2 auth only) - TenantId string // Id of the tenant (v2 auth only) - Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine) - // These are filled in after Authenticate is called as are the defaults for above - StorageUrl string - AuthToken string - client *http.Client - Auth Authenticator `json:"-" xml:"-"` // the current authenticator - authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth -} - -// Error - all errors generated by this package are of this type. Other error -// may be passed on from library functions though. -type Error struct { - StatusCode int // HTTP status code if relevant or 0 if not - Text string -} - -// Error satisfy the error interface. -func (e *Error) Error() string { - return e.Text -} - -// newError make a new error from a string. -func newError(StatusCode int, Text string) *Error { - return &Error{ - StatusCode: StatusCode, - Text: Text, - } -} - -// newErrorf makes a new error from sprintf parameters. -func newErrorf(StatusCode int, Text string, Parameters ...interface{}) *Error { - return newError(StatusCode, fmt.Sprintf(Text, Parameters...)) -} - -// errorMap defines http error codes to error mappings. -type errorMap map[int]error - -var ( - // Specific Errors you might want to check for equality - BadRequest = newError(400, "Bad Request") - AuthorizationFailed = newError(401, "Authorization Failed") - ContainerNotFound = newError(404, "Container Not Found") - ContainerNotEmpty = newError(409, "Container Not Empty") - ObjectNotFound = newError(404, "Object Not Found") - ObjectCorrupted = newError(422, "Object Corrupted") - TimeoutError = newError(408, "Timeout when reading or writing data") - Forbidden = newError(403, "Operation forbidden") - TooLargeObject = newError(413, "Too Large Object") - - // Mappings for authentication errors - authErrorMap = errorMap{ - 400: BadRequest, - 401: AuthorizationFailed, - 403: Forbidden, - } - - // Mappings for container errors - ContainerErrorMap = errorMap{ - 400: BadRequest, - 403: Forbidden, - 404: ContainerNotFound, - 409: ContainerNotEmpty, - } - - // Mappings for object errors - objectErrorMap = errorMap{ - 400: BadRequest, - 403: Forbidden, - 404: ObjectNotFound, - 413: TooLargeObject, - 422: ObjectCorrupted, - } -) - -// checkClose is used to check the return from Close in a defer -// statement. -func checkClose(c io.Closer, err *error) { - cerr := c.Close() - if *err == nil { - *err = cerr - } -} - -// parseHeaders checks a response for errors and translates into -// standard errors if necessary. -func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error { - if errorMap != nil { - if err, ok := errorMap[resp.StatusCode]; ok { - return err - } - } - if resp.StatusCode < 200 || resp.StatusCode > 299 { - return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status) - } - return nil -} - -// readHeaders returns a Headers object from the http.Response. -// -// If it receives multiple values for a key (which should never -// happen) it will use the first one -func readHeaders(resp *http.Response) Headers { - headers := Headers{} - for key, values := range resp.Header { - headers[key] = values[0] - } - return headers -} - -// Headers stores HTTP headers (can only have one of each header like Swift). -type Headers map[string]string - -// Does an http request using the running timer passed in -func (c *Connection) doTimeoutRequest(timer *time.Timer, req *http.Request) (*http.Response, error) { - // Do the request in the background so we can check the timeout - type result struct { - resp *http.Response - err error - } - done := make(chan result, 1) - go func() { - resp, err := c.client.Do(req) - done <- result{resp, err} - }() - // Wait for the read or the timeout - select { - case r := <-done: - return r.resp, r.err - case <-timer.C: - // Kill the connection on timeout so we don't leak sockets or goroutines - cancelRequest(c.Transport, req) - return nil, TimeoutError - } - panic("unreachable") // For Go 1.0 -} - -// Set defaults for any unset values -// -// Call with authLock held -func (c *Connection) setDefaults() { - if c.UserAgent == "" { - c.UserAgent = DefaultUserAgent - } - if c.Retries == 0 { - c.Retries = DefaultRetries - } - if c.ConnectTimeout == 0 { - c.ConnectTimeout = 10 * time.Second - } - if c.Timeout == 0 { - c.Timeout = 60 * time.Second - } - if c.Transport == nil { - c.Transport = &http.Transport{ - // TLSClientConfig: &tls.Config{RootCAs: pool}, - // DisableCompression: true, - Proxy: http.ProxyFromEnvironment, - MaxIdleConnsPerHost: 2048, - } - } - if c.client == nil { - c.client = &http.Client{ - // CheckRedirect: redirectPolicyFunc, - Transport: c.Transport, - } - } -} - -// Authenticate connects to the Swift server. -// -// If you don't call it before calling one of the connection methods -// then it will be called for you on the first access. -func (c *Connection) Authenticate() (err error) { - c.authLock.Lock() - defer c.authLock.Unlock() - return c.authenticate() -} - -// Internal implementation of Authenticate -// -// Call with authLock held -func (c *Connection) authenticate() (err error) { - c.setDefaults() - - // Flush the keepalives connection - if we are - // re-authenticating then stuff has gone wrong - flushKeepaliveConnections(c.Transport) - - if c.Auth == nil { - c.Auth, err = newAuth(c) - if err != nil { - return - } - } - - retries := 1 -again: - var req *http.Request - req, err = c.Auth.Request(c) - if err != nil { - return - } - timer := time.NewTimer(c.ConnectTimeout) - var resp *http.Response - resp, err = c.doTimeoutRequest(timer, req) - if err != nil { - return - } - defer func() { - checkClose(resp.Body, &err) - // Flush the auth connection - we don't want to keep - // it open if keepalives were enabled - flushKeepaliveConnections(c.Transport) - }() - if err = c.parseHeaders(resp, authErrorMap); err != nil { - // Try again for a limited number of times on - // AuthorizationFailed or BadRequest. This allows us - // to try some alternate forms of the request - if (err == AuthorizationFailed || err == BadRequest) && retries > 0 { - retries-- - goto again - } - return - } - err = c.Auth.Response(resp) - if err != nil { - return - } - c.StorageUrl = c.Auth.StorageUrl(c.Internal) - c.AuthToken = c.Auth.Token() - if !c.authenticated() { - err = newError(0, "Response didn't have storage url and auth token") - return - } - return -} - -// Get an authToken and url -// -// The Url may be updated if it needed to authenticate using the OnReAuth function -func (c *Connection) getUrlAndAuthToken(targetUrlIn string, OnReAuth func() (string, error)) (targetUrlOut, authToken string, err error) { - c.authLock.Lock() - defer c.authLock.Unlock() - targetUrlOut = targetUrlIn - if !c.authenticated() { - err = c.authenticate() - if err != nil { - return - } - if OnReAuth != nil { - targetUrlOut, err = OnReAuth() - if err != nil { - return - } - } - } - authToken = c.AuthToken - return -} - -// flushKeepaliveConnections is called to flush pending requests after an error. -func flushKeepaliveConnections(transport http.RoundTripper) { - if tr, ok := transport.(interface { - CloseIdleConnections() - }); ok { - tr.CloseIdleConnections() - } -} - -// UnAuthenticate removes the authentication from the Connection. -func (c *Connection) UnAuthenticate() { - c.authLock.Lock() - c.StorageUrl = "" - c.AuthToken = "" - c.authLock.Unlock() -} - -// Authenticated returns a boolean to show if the current connection -// is authenticated. -// -// Doesn't actually check the credentials against the server. -func (c *Connection) Authenticated() bool { - c.authLock.Lock() - defer c.authLock.Unlock() - return c.authenticated() -} - -// Internal version of Authenticated() -// -// Call with authLock held -func (c *Connection) authenticated() bool { - return c.StorageUrl != "" && c.AuthToken != "" -} - -// RequestOpts contains parameters for Connection.storage. -type RequestOpts struct { - Container string - ObjectName string - Operation string - Parameters url.Values - Headers Headers - ErrorMap errorMap - NoResponse bool - Body io.Reader - Retries int - // if set this is called on re-authentication to refresh the targetUrl - OnReAuth func() (string, error) -} - -// Call runs a remote command on the targetUrl, returns a -// response, headers and possible error. -// -// operation is GET, HEAD etc -// container is the name of a container -// Any other parameters (if not None) are added to the targetUrl -// -// Returns a response or an error. If response is returned then -// resp.Body.Close() must be called on it, unless noResponse is set in -// which case the body will be closed in this function -// -// This will Authenticate if necessary, and re-authenticate if it -// receives a 401 error which means the token has expired -// -// This method is exported so extensions can call it. -func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, headers Headers, err error) { - c.authLock.Lock() - c.setDefaults() - c.authLock.Unlock() - retries := p.Retries - if retries == 0 { - retries = c.Retries - } - var req *http.Request - for { - var authToken string - targetUrl, authToken, err = c.getUrlAndAuthToken(targetUrl, p.OnReAuth) - - var URL *url.URL - URL, err = url.Parse(targetUrl) - if err != nil { - return - } - if p.Container != "" { - URL.Path += "/" + p.Container - if p.ObjectName != "" { - URL.Path += "/" + p.ObjectName - } - } - if p.Parameters != nil { - URL.RawQuery = p.Parameters.Encode() - } - timer := time.NewTimer(c.ConnectTimeout) - reader := p.Body - if reader != nil { - reader = newWatchdogReader(reader, c.Timeout, timer) - } - req, err = http.NewRequest(p.Operation, URL.String(), reader) - if err != nil { - return - } - if p.Headers != nil { - for k, v := range p.Headers { - req.Header.Add(k, v) - } - } - req.Header.Add("User-Agent", DefaultUserAgent) - req.Header.Add("X-Auth-Token", authToken) - resp, err = c.doTimeoutRequest(timer, req) - if err != nil { - if p.Operation == "HEAD" || p.Operation == "GET" { - retries-- - continue - } - return - } - // Check to see if token has expired - if resp.StatusCode == 401 && retries > 0 { - _ = resp.Body.Close() - c.UnAuthenticate() - retries-- - } else { - break - } - } - - if err = c.parseHeaders(resp, p.ErrorMap); err != nil { - _ = resp.Body.Close() - return nil, nil, err - } - headers = readHeaders(resp) - if p.NoResponse { - err = resp.Body.Close() - if err != nil { - return nil, nil, err - } - } else { - // Cancel the request on timeout - cancel := func() { - cancelRequest(c.Transport, req) - } - // Wrap resp.Body to make it obey an idle timeout - resp.Body = newTimeoutReader(resp.Body, c.Timeout, cancel) - } - return -} - -// storage runs a remote command on a the storage url, returns a -// response, headers and possible error. -// -// operation is GET, HEAD etc -// container is the name of a container -// Any other parameters (if not None) are added to the storage url -// -// Returns a response or an error. If response is returned then -// resp.Body.Close() must be called on it, unless noResponse is set in -// which case the body will be closed in this function -// -// This will Authenticate if necessary, and re-authenticate if it -// receives a 401 error which means the token has expired -func (c *Connection) storage(p RequestOpts) (resp *http.Response, headers Headers, err error) { - p.OnReAuth = func() (string, error) { - return c.StorageUrl, nil - } - c.authLock.Lock() - url := c.StorageUrl - c.authLock.Unlock() - return c.Call(url, p) -} - -// readLines reads the response into an array of strings. -// -// Closes the response when done -func readLines(resp *http.Response) (lines []string, err error) { - defer checkClose(resp.Body, &err) - reader := bufio.NewReader(resp.Body) - buffer := bytes.NewBuffer(make([]byte, 0, 128)) - var part []byte - var prefix bool - for { - if part, prefix, err = reader.ReadLine(); err != nil { - break - } - buffer.Write(part) - if !prefix { - lines = append(lines, buffer.String()) - buffer.Reset() - } - } - if err == io.EOF { - err = nil - } - return -} - -// readJson reads the response into the json type passed in -// -// Closes the response when done -func readJson(resp *http.Response, result interface{}) (err error) { - defer checkClose(resp.Body, &err) - decoder := json.NewDecoder(resp.Body) - return decoder.Decode(result) -} - -/* ------------------------------------------------------------ */ - -// ContainersOpts is options for Containers() and ContainerNames() -type ContainersOpts struct { - Limit int // For an integer value n, limits the number of results to at most n values. - Marker string // Given a string value x, return object names greater in value than the specified marker. - EndMarker string // Given a string value x, return container names less in value than the specified marker. - Headers Headers // Any additional HTTP headers - can be nil -} - -// parse the ContainerOpts -func (opts *ContainersOpts) parse() (url.Values, Headers) { - v := url.Values{} - var h Headers - if opts != nil { - if opts.Limit > 0 { - v.Set("limit", strconv.Itoa(opts.Limit)) - } - if opts.Marker != "" { - v.Set("marker", opts.Marker) - } - if opts.EndMarker != "" { - v.Set("end_marker", opts.EndMarker) - } - h = opts.Headers - } - return v, h -} - -// ContainerNames returns a slice of names of containers in this account. -func (c *Connection) ContainerNames(opts *ContainersOpts) ([]string, error) { - v, h := opts.parse() - resp, _, err := c.storage(RequestOpts{ - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - lines, err := readLines(resp) - return lines, err -} - -// Container contains information about a container -type Container struct { - Name string // Name of the container - Count int64 // Number of objects in the container - Bytes int64 // Total number of bytes used in the container -} - -// Containers returns a slice of structures with full information as -// described in Container. -func (c *Connection) Containers(opts *ContainersOpts) ([]Container, error) { - v, h := opts.parse() - v.Set("format", "json") - resp, _, err := c.storage(RequestOpts{ - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - var containers []Container - err = readJson(resp, &containers) - return containers, err -} - -// containersAllOpts makes a copy of opts if set or makes a new one and -// overrides Limit and Marker -func containersAllOpts(opts *ContainersOpts) *ContainersOpts { - var newOpts ContainersOpts - if opts != nil { - newOpts = *opts - } - if newOpts.Limit == 0 { - newOpts.Limit = allContainersLimit - } - newOpts.Marker = "" - return &newOpts -} - -// ContainersAll is like Containers but it returns all the Containers -// -// It calls Containers multiple times using the Marker parameter -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ContainersAll(opts *ContainersOpts) ([]Container, error) { - opts = containersAllOpts(opts) - containers := make([]Container, 0) - for { - newContainers, err := c.Containers(opts) - if err != nil { - return nil, err - } - containers = append(containers, newContainers...) - if len(newContainers) < opts.Limit { - break - } - opts.Marker = newContainers[len(newContainers)-1].Name - } - return containers, nil -} - -// ContainerNamesAll is like ContainerNamess but it returns all the Containers -// -// It calls ContainerNames multiple times using the Marker parameter -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ContainerNamesAll(opts *ContainersOpts) ([]string, error) { - opts = containersAllOpts(opts) - containers := make([]string, 0) - for { - newContainers, err := c.ContainerNames(opts) - if err != nil { - return nil, err - } - containers = append(containers, newContainers...) - if len(newContainers) < opts.Limit { - break - } - opts.Marker = newContainers[len(newContainers)-1] - } - return containers, nil -} - -/* ------------------------------------------------------------ */ - -// ObjectOpts is options for Objects() and ObjectNames() -type ObjectsOpts struct { - Limit int // For an integer value n, limits the number of results to at most n values. - Marker string // Given a string value x, return object names greater in value than the specified marker. - EndMarker string // Given a string value x, return object names less in value than the specified marker - Prefix string // For a string value x, causes the results to be limited to object names beginning with the substring x. - Path string // For a string value x, return the object names nested in the pseudo path - Delimiter rune // For a character c, return all the object names nested in the container - Headers Headers // Any additional HTTP headers - can be nil -} - -// parse reads values out of ObjectsOpts -func (opts *ObjectsOpts) parse() (url.Values, Headers) { - v := url.Values{} - var h Headers - if opts != nil { - if opts.Limit > 0 { - v.Set("limit", strconv.Itoa(opts.Limit)) - } - if opts.Marker != "" { - v.Set("marker", opts.Marker) - } - if opts.EndMarker != "" { - v.Set("end_marker", opts.EndMarker) - } - if opts.Prefix != "" { - v.Set("prefix", opts.Prefix) - } - if opts.Path != "" { - v.Set("path", opts.Path) - } - if opts.Delimiter != 0 { - v.Set("delimiter", string(opts.Delimiter)) - } - h = opts.Headers - } - return v, h -} - -// ObjectNames returns a slice of names of objects in a given container. -func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string, error) { - v, h := opts.parse() - resp, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - return readLines(resp) -} - -// Object contains information about an object -type Object struct { - Name string `json:"name"` // object name - ContentType string `json:"content_type"` // eg application/directory - Bytes int64 `json:"bytes"` // size in bytes - ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server - LastModified time.Time // Last modified time converted to a time.Time - Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" - PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist - SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" -} - -// Objects returns a slice of Object with information about each -// object in the container. -// -// If Delimiter is set in the opts then PseudoDirectory may be set, -// with ContentType 'application/directory'. These are not real -// objects but represent directories of objects which haven't had an -// object created for them. -func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, error) { - v, h := opts.parse() - v.Set("format", "json") - resp, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - var objects []Object - err = readJson(resp, &objects) - // Convert Pseudo directories and dates - for i := range objects { - object := &objects[i] - if object.SubDir != "" { - object.Name = object.SubDir - object.PseudoDirectory = true - object.ContentType = "application/directory" - } - if object.ServerLastModified != "" { - // 2012-11-11T14:49:47.887250 - // - // Remove fractional seconds if present. This - // then keeps it consistent with Object - // which can only return timestamps accurate - // to 1 second - // - // The TimeFormat will parse fractional - // seconds if desired though - datetime := strings.SplitN(object.ServerLastModified, ".", 2)[0] - object.LastModified, err = time.Parse(TimeFormat, datetime) - if err != nil { - return nil, err - } - } - } - return objects, err -} - -// objectsAllOpts makes a copy of opts if set or makes a new one and -// overrides Limit and Marker -func objectsAllOpts(opts *ObjectsOpts, Limit int) *ObjectsOpts { - var newOpts ObjectsOpts - if opts != nil { - newOpts = *opts - } - if newOpts.Limit == 0 { - newOpts.Limit = Limit - } - newOpts.Marker = "" - return &newOpts -} - -// A closure defined by the caller to iterate through all objects -// -// Call Objects or ObjectNames from here with the *ObjectOpts passed in -// -// Do whatever is required with the results then return them -type ObjectsWalkFn func(*ObjectsOpts) (interface{}, error) - -// ObjectsWalk is uses to iterate through all the objects in chunks as -// returned by Objects or ObjectNames using the Marker and Limit -// parameters in the ObjectsOpts. -// -// Pass in a closure `walkFn` which calls Objects or ObjectNames with -// the *ObjectsOpts passed to it and does something with the results. -// -// Errors will be returned from this function -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ObjectsWalk(container string, opts *ObjectsOpts, walkFn ObjectsWalkFn) error { - opts = objectsAllOpts(opts, allObjectsChanLimit) - for { - objects, err := walkFn(opts) - if err != nil { - return err - } - var n int - var last string - switch objects := objects.(type) { - case []string: - n = len(objects) - if n > 0 { - last = objects[len(objects)-1] - } - case []Object: - n = len(objects) - if n > 0 { - last = objects[len(objects)-1].Name - } - default: - panic("Unknown type returned to ObjectsWalk") - } - if n < opts.Limit { - break - } - opts.Marker = last - } - return nil -} - -// ObjectsAll is like Objects but it returns an unlimited number of Objects in a slice -// -// It calls Objects multiple times using the Marker parameter -func (c *Connection) ObjectsAll(container string, opts *ObjectsOpts) ([]Object, error) { - objects := make([]Object, 0) - err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { - newObjects, err := c.Objects(container, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - return objects, err -} - -// ObjectNamesAll is like ObjectNames but it returns all the Objects -// -// It calls ObjectNames multiple times using the Marker parameter -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ObjectNamesAll(container string, opts *ObjectsOpts) ([]string, error) { - objects := make([]string, 0) - err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { - newObjects, err := c.ObjectNames(container, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - return objects, err -} - -// Account contains information about this account. -type Account struct { - BytesUsed int64 // total number of bytes used - Containers int64 // total number of containers - Objects int64 // total number of objects -} - -// getInt64FromHeader is a helper function to decode int64 from header. -func getInt64FromHeader(resp *http.Response, header string) (result int64, err error) { - value := resp.Header.Get(header) - result, err = strconv.ParseInt(value, 10, 64) - if err != nil { - err = newErrorf(0, "Bad Header '%s': '%s': %s", header, value, err) - } - return -} - -// Account returns info about the account in an Account struct. -func (c *Connection) Account() (info Account, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Operation: "HEAD", - ErrorMap: ContainerErrorMap, - NoResponse: true, - }) - if err != nil { - return - } - // Parse the headers into a dict - // - // {'Accept-Ranges': 'bytes', - // 'Content-Length': '0', - // 'Date': 'Tue, 05 Jul 2011 16:37:06 GMT', - // 'X-Account-Bytes-Used': '316598182', - // 'X-Account-Container-Count': '4', - // 'X-Account-Object-Count': '1433'} - if info.BytesUsed, err = getInt64FromHeader(resp, "X-Account-Bytes-Used"); err != nil { - return - } - if info.Containers, err = getInt64FromHeader(resp, "X-Account-Container-Count"); err != nil { - return - } - if info.Objects, err = getInt64FromHeader(resp, "X-Account-Object-Count"); err != nil { - return - } - return -} - -// AccountUpdate adds, replaces or remove account metadata. -// -// Add or update keys by mentioning them in the Headers. -// -// Remove keys by setting them to an empty string. -func (c *Connection) AccountUpdate(h Headers) error { - _, _, err := c.storage(RequestOpts{ - Operation: "POST", - ErrorMap: ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ContainerCreate creates a container. -// -// If you don't want to add Headers just pass in nil -// -// No error is returned if it already exists but the metadata if any will be updated. -func (c *Connection) ContainerCreate(container string, h Headers) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "PUT", - ErrorMap: ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ContainerDelete deletes a container. -// -// May return ContainerDoesNotExist or ContainerNotEmpty -func (c *Connection) ContainerDelete(container string) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "DELETE", - ErrorMap: ContainerErrorMap, - NoResponse: true, - }) - return err -} - -// Container returns info about a single container including any -// metadata in the headers. -func (c *Connection) Container(container string) (info Container, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Container: container, - Operation: "HEAD", - ErrorMap: ContainerErrorMap, - NoResponse: true, - }) - if err != nil { - return - } - // Parse the headers into the struct - info.Name = container - if info.Bytes, err = getInt64FromHeader(resp, "X-Container-Bytes-Used"); err != nil { - return - } - if info.Count, err = getInt64FromHeader(resp, "X-Container-Object-Count"); err != nil { - return - } - return -} - -// ContainerUpdate adds, replaces or removes container metadata. -// -// Add or update keys by mentioning them in the Metadata. -// -// Remove keys by setting them to an empty string. -// -// Container metadata can only be read with Container() not with Containers(). -func (c *Connection) ContainerUpdate(container string, h Headers) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "POST", - ErrorMap: ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ------------------------------------------------------------ - -// ObjectCreateFile represents a swift object open for writing -type ObjectCreateFile struct { - checkHash bool // whether we are checking the hash - pipeReader *io.PipeReader // pipe for the caller to use - pipeWriter *io.PipeWriter - hash hash.Hash // hash being build up as we go along - done chan struct{} // signals when the upload has finished - resp *http.Response // valid when done has signalled - err error // ditto - headers Headers // ditto -} - -// Write bytes to the object - see io.Writer -func (file *ObjectCreateFile) Write(p []byte) (n int, err error) { - n, err = file.pipeWriter.Write(p) - if err == io.ErrClosedPipe { - if file.err != nil { - return 0, file.err - } - return 0, newError(500, "Write on closed file") - } - if err == nil && file.checkHash { - _, _ = file.hash.Write(p) - } - return -} - -// Close the object and checks the md5sum if it was required. -// -// Also returns any other errors from the server (eg container not -// found) so it is very important to check the errors on this method. -func (file *ObjectCreateFile) Close() error { - // Close the body - err := file.pipeWriter.Close() - if err != nil { - return err - } - - // Wait for the HTTP operation to complete - <-file.done - - // Check errors - if file.err != nil { - return file.err - } - if file.checkHash { - receivedMd5 := strings.ToLower(file.headers["Etag"]) - calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - return ObjectCorrupted - } - } - return nil -} - -// Check it satisfies the interface -var _ io.WriteCloser = &ObjectCreateFile{} - -// objectPutHeaders create a set of headers for a PUT -// -// It guesses the contentType from the objectName if it isn't set -// -// checkHash may be changed -func objectPutHeaders(objectName string, checkHash *bool, Hash string, contentType string, h Headers) Headers { - if contentType == "" { - contentType = mime.TypeByExtension(path.Ext(objectName)) - if contentType == "" { - contentType = "application/octet-stream" - } - } - // Meta stuff - extraHeaders := map[string]string{ - "Content-Type": contentType, - } - for key, value := range h { - extraHeaders[key] = value - } - if Hash != "" { - extraHeaders["Etag"] = Hash - *checkHash = false // the server will do it - } - return extraHeaders -} - -// ObjectCreate creates or updates the object in the container. It -// returns an io.WriteCloser you should write the contents to. You -// MUST call Close() on it and you MUST check the error return from -// Close(). -// -// If checkHash is True then it will calculate the MD5 Hash of the -// file as it is being uploaded and check it against that returned -// from the server. If it is wrong then it will return -// ObjectCorrupted on Close() -// -// If you know the MD5 hash of the object ahead of time then set the -// Hash parameter and it will be sent to the server (as an Etag -// header) and the server will check the MD5 itself after the upload, -// and this will return ObjectCorrupted on Close() if it is incorrect. -// -// If you don't want any error protection (not recommended) then set -// checkHash to false and Hash to "". -// -// If contentType is set it will be used, otherwise one will be -// guessed from objectName using mime.TypeByExtension -func (c *Connection) ObjectCreate(container string, objectName string, checkHash bool, Hash string, contentType string, h Headers) (file *ObjectCreateFile, err error) { - extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) - pipeReader, pipeWriter := io.Pipe() - file = &ObjectCreateFile{ - hash: md5.New(), - checkHash: checkHash, - pipeReader: pipeReader, - pipeWriter: pipeWriter, - done: make(chan struct{}), - } - // Run the PUT in the background piping it data - go func() { - file.resp, file.headers, file.err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "PUT", - Headers: extraHeaders, - Body: pipeReader, - NoResponse: true, - ErrorMap: objectErrorMap, - }) - // Signal finished - pipeReader.Close() - close(file.done) - }() - return -} - -// ObjectPut creates or updates the path in the container from -// contents. contents should be an open io.Reader which will have all -// its contents read. -// -// This is a low level interface. -// -// If checkHash is True then it will calculate the MD5 Hash of the -// file as it is being uploaded and check it against that returned -// from the server. If it is wrong then it will return -// ObjectCorrupted. -// -// If you know the MD5 hash of the object ahead of time then set the -// Hash parameter and it will be sent to the server (as an Etag -// header) and the server will check the MD5 itself after the upload, -// and this will return ObjectCorrupted if it is incorrect. -// -// If you don't want any error protection (not recommended) then set -// checkHash to false and Hash to "". -// -// If contentType is set it will be used, otherwise one will be -// guessed from objectName using mime.TypeByExtension -func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) { - extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) - hash := md5.New() - var body io.Reader = contents - if checkHash { - body = io.TeeReader(contents, hash) - } - _, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "PUT", - Headers: extraHeaders, - Body: body, - NoResponse: true, - ErrorMap: objectErrorMap, - }) - if err != nil { - return - } - if checkHash { - receivedMd5 := strings.ToLower(headers["Etag"]) - calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - err = ObjectCorrupted - return - } - } - return -} - -// ObjectPutBytes creates an object from a []byte in a container. -// -// This is a simplified interface which checks the MD5. -func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) { - buf := bytes.NewBuffer(contents) - _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) - return -} - -// ObjectPutString creates an object from a string in a container. -// -// This is a simplified interface which checks the MD5 -func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) { - buf := strings.NewReader(contents) - _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) - return -} - -// ObjectOpenFile represents a swift object open for reading -type ObjectOpenFile struct { - connection *Connection // stored copy of Connection used in Open - container string // stored copy of container used in Open - objectName string // stored copy of objectName used in Open - headers Headers // stored copy of headers used in Open - resp *http.Response // http connection - body io.Reader // read data from this - checkHash bool // true if checking MD5 - hash hash.Hash // currently accumulating MD5 - bytes int64 // number of bytes read on this connection - eof bool // whether we have read end of file - pos int64 // current position when reading - lengthOk bool // whether length is valid - length int64 // length of the object if read - seeked bool // whether we have seeked this file or not -} - -// Read bytes from the object - see io.Reader -func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { - n, err = file.body.Read(p) - file.bytes += int64(n) - file.pos += int64(n) - if err == io.EOF { - file.eof = true - } - return -} - -// Seek sets the offset for the next Read to offset, interpreted -// according to whence: 0 means relative to the origin of the file, 1 -// means relative to the current offset, and 2 means relative to the -// end. Seek returns the new offset and an Error, if any. -// -// Seek uses HTTP Range headers which, if the file pointer is moved, -// will involve reopening the HTTP connection. -// -// Note that you can't seek to the end of a file or beyond; HTTP Range -// requests don't support the file pointer being outside the data, -// unlike os.File -// -// Seek(0, 1) will return the current file pointer. -func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) { - switch whence { - case 0: // relative to start - newPos = offset - case 1: // relative to current - newPos = file.pos + offset - case 2: // relative to end - if !file.lengthOk { - return file.pos, newError(0, "Length of file unknown so can't seek from end") - } - newPos = file.length + offset - default: - panic("Unknown whence in ObjectOpenFile.Seek") - } - // If at correct position (quite likely), do nothing - if newPos == file.pos { - return - } - // Close the file... - file.seeked = true - err = file.Close() - if err != nil { - return - } - // ...and re-open with a Range header - if file.headers == nil { - file.headers = Headers{} - } - if newPos > 0 { - file.headers["Range"] = fmt.Sprintf("bytes=%d-", newPos) - } else { - delete(file.headers, "Range") - } - newFile, _, err := file.connection.ObjectOpen(file.container, file.objectName, false, file.headers) - if err != nil { - return - } - // Update the file - file.resp = newFile.resp - file.body = newFile.body - file.checkHash = false - file.pos = newPos - return -} - -// Length gets the objects content length either from a cached copy or -// from the server. -func (file *ObjectOpenFile) Length() (int64, error) { - if !file.lengthOk { - info, _, err := file.connection.Object(file.container, file.objectName) - file.length = info.Bytes - file.lengthOk = (err == nil) - return file.length, err - } - return file.length, nil -} - -// Close the object and checks the length and md5sum if it was -// required and all the object was read -func (file *ObjectOpenFile) Close() (err error) { - // Close the body at the end - defer checkClose(file.resp.Body, &err) - - // If not end of file or seeked then can't check anything - if !file.eof || file.seeked { - return - } - - // Check the MD5 sum if requested - if file.checkHash { - receivedMd5 := strings.ToLower(file.resp.Header.Get("Etag")) - calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - err = ObjectCorrupted - return - } - } - - // Check to see we read the correct number of bytes - if file.lengthOk && file.length != file.bytes { - err = ObjectCorrupted - return - } - return -} - -// Check it satisfies the interfaces -var _ io.ReadCloser = &ObjectOpenFile{} -var _ io.Seeker = &ObjectOpenFile{} - -// ObjectOpen returns an ObjectOpenFile for reading the contents of -// the object. This satisfies the io.ReadCloser and the io.Seeker -// interfaces. -// -// You must call Close() on contents when finished -// -// Returns the headers of the response. -// -// If checkHash is true then it will calculate the md5sum of the file -// as it is being received and check it against that returned from the -// server. If it is wrong then it will return ObjectCorrupted. It -// will also check the length returned. No checking will be done if -// you don't read all the contents. -// -// Note that objects with X-Object-Manifest set won't ever have their -// md5sum's checked as the md5sum reported on the object is actually -// the md5sum of the md5sums of the parts. This isn't very helpful to -// detect a corrupted download as the size of the parts aren't known -// without doing more operations. If you want to ensure integrity of -// an object with a manifest then you will need to download everything -// in the manifest separately. -// -// headers["Content-Type"] will give the content type if desired. -func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "GET", - ErrorMap: objectErrorMap, - Headers: h, - }) - if err != nil { - return - } - // Can't check MD5 on an object with X-Object-Manifest set - if checkHash && headers["X-Object-Manifest"] != "" { - // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName) - checkHash = false - } - file = &ObjectOpenFile{ - connection: c, - container: container, - objectName: objectName, - headers: h, - resp: resp, - checkHash: checkHash, - body: resp.Body, - } - if checkHash { - file.hash = md5.New() - file.body = io.TeeReader(resp.Body, file.hash) - } - // Read Content-Length - file.length, err = getInt64FromHeader(resp, "Content-Length") - file.lengthOk = (err == nil) - return -} - -// ObjectGet gets the object into the io.Writer contents. -// -// Returns the headers of the response. -// -// If checkHash is true then it will calculate the md5sum of the file -// as it is being received and check it against that returned from the -// server. If it is wrong then it will return ObjectCorrupted. -// -// headers["Content-Type"] will give the content type if desired. -func (c *Connection) ObjectGet(container string, objectName string, contents io.Writer, checkHash bool, h Headers) (headers Headers, err error) { - file, headers, err := c.ObjectOpen(container, objectName, checkHash, h) - if err != nil { - return - } - defer checkClose(file, &err) - _, err = io.Copy(contents, file) - return -} - -// ObjectGetBytes returns an object as a []byte. -// -// This is a simplified interface which checks the MD5 -func (c *Connection) ObjectGetBytes(container string, objectName string) (contents []byte, err error) { - var buf bytes.Buffer - _, err = c.ObjectGet(container, objectName, &buf, true, nil) - contents = buf.Bytes() - return -} - -// ObjectGetString returns an object as a string. -// -// This is a simplified interface which checks the MD5 -func (c *Connection) ObjectGetString(container string, objectName string) (contents string, err error) { - var buf bytes.Buffer - _, err = c.ObjectGet(container, objectName, &buf, true, nil) - contents = buf.String() - return -} - -// ObjectDelete deletes the object. -// -// May return ObjectNotFound if the object isn't found -func (c *Connection) ObjectDelete(container string, objectName string) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "DELETE", - ErrorMap: objectErrorMap, - }) - return err -} - -// parseResponseStatus parses string like "200 OK" and returns Error. -// -// For status codes beween 200 and 299, this returns nil. -func parseResponseStatus(resp string, errorMap errorMap) error { - code := 0 - reason := resp - t := strings.SplitN(resp, " ", 2) - if len(t) == 2 { - ncode, err := strconv.Atoi(t[0]) - if err == nil { - code = ncode - reason = t[1] - } - } - if errorMap != nil { - if err, ok := errorMap[code]; ok { - return err - } - } - if 200 <= code && code <= 299 { - return nil - } - return newError(code, reason) -} - -// BulkDeleteResult stores results of BulkDelete(). -// -// Individual errors may (or may not) be returned by Errors. -// Errors is a map whose keys are a full path of where the object was -// to be deleted, and whose values are Error objects. A full path of -// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". -type BulkDeleteResult struct { - NumberNotFound int64 // # of objects not found. - NumberDeleted int64 // # of deleted objects. - Errors map[string]error // Mapping between object name and an error. - Headers Headers // Response HTTP headers. -} - -// BulkDelete deletes multiple objectNames from container in one operation. -// -// Some servers may not accept bulk-delete requests since bulk-delete is -// an optional feature of swift - these will return the Forbidden error. -// -// See also: -// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html -// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html -func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { - var buffer bytes.Buffer - for _, s := range objectNames { - buffer.WriteString(fmt.Sprintf("/%s/%s\n", container, - url.QueryEscape(s))) - } - resp, headers, err := c.storage(RequestOpts{ - Operation: "DELETE", - Parameters: url.Values{"bulk-delete": []string{"1"}}, - Headers: Headers{ - "Accept": "application/json", - "Content-Type": "text/plain", - }, - ErrorMap: ContainerErrorMap, - Body: &buffer, - }) - if err != nil { - return - } - var jsonResult struct { - NotFound int64 `json:"Number Not Found"` - Status string `json:"Response Status"` - Errors [][]string - Deleted int64 `json:"Number Deleted"` - } - err = readJson(resp, &jsonResult) - if err != nil { - return - } - - err = parseResponseStatus(jsonResult.Status, objectErrorMap) - result.NumberNotFound = jsonResult.NotFound - result.NumberDeleted = jsonResult.Deleted - result.Headers = headers - el := make(map[string]error, len(jsonResult.Errors)) - for _, t := range jsonResult.Errors { - if len(t) != 2 { - continue - } - el[t[0]] = parseResponseStatus(t[1], objectErrorMap) - } - result.Errors = el - return -} - -// BulkUploadResult stores results of BulkUpload(). -// -// Individual errors may (or may not) be returned by Errors. -// Errors is a map whose keys are a full path of where an object was -// to be created, and whose values are Error objects. A full path of -// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". -type BulkUploadResult struct { - NumberCreated int64 // # of created objects. - Errors map[string]error // Mapping between object name and an error. - Headers Headers // Response HTTP headers. -} - -// BulkUpload uploads multiple files in one operation. -// -// uploadPath can be empty, a container name, or a pseudo-directory -// within a container. If uploadPath is empty, new containers may be -// automatically created. -// -// Files are read from dataStream. The format of the stream is specified -// by the format parameter. Available formats are: -// * UploadTar - Plain tar stream. -// * UploadTarGzip - Gzip compressed tar stream. -// * UploadTarBzip2 - Bzip2 compressed tar stream. -// -// Some servers may not accept bulk-upload requests since bulk-upload is -// an optional feature of swift - these will return the Forbidden error. -// -// See also: -// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-extract-archive.html -// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html -func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format string, h Headers) (result BulkUploadResult, err error) { - extraHeaders := Headers{"Accept": "application/json"} - for key, value := range h { - extraHeaders[key] = value - } - // The following code abuses Container parameter intentionally. - // The best fix might be to rename Container to UploadPath. - resp, headers, err := c.storage(RequestOpts{ - Container: uploadPath, - Operation: "PUT", - Parameters: url.Values{"extract-archive": []string{format}}, - Headers: extraHeaders, - ErrorMap: ContainerErrorMap, - Body: dataStream, - }) - if err != nil { - return - } - // Detect old servers which don't support this feature - if headers["Content-Type"] != "application/json" { - err = Forbidden - return - } - var jsonResult struct { - Created int64 `json:"Number Files Created"` - Status string `json:"Response Status"` - Errors [][]string - } - err = readJson(resp, &jsonResult) - if err != nil { - return - } - - err = parseResponseStatus(jsonResult.Status, objectErrorMap) - result.NumberCreated = jsonResult.Created - result.Headers = headers - el := make(map[string]error, len(jsonResult.Errors)) - for _, t := range jsonResult.Errors { - if len(t) != 2 { - continue - } - el[t[0]] = parseResponseStatus(t[1], objectErrorMap) - } - result.Errors = el - return -} - -// Object returns info about a single object including any metadata in the header. -// -// May return ObjectNotFound. -// -// Use headers.ObjectMetadata() to read the metadata in the Headers. -func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "HEAD", - ErrorMap: objectErrorMap, - NoResponse: true, - }) - if err != nil { - return - } - // Parse the headers into the struct - // HTTP/1.1 200 OK - // Date: Thu, 07 Jun 2010 20:59:39 GMT - // Server: Apache - // Last-Modified: Fri, 12 Jun 2010 13:40:18 GMT - // ETag: 8a964ee2a5e88be344f36c22562a6486 - // Content-Length: 512000 - // Content-Type: text/plain; charset=UTF-8 - // X-Object-Meta-Meat: Bacon - // X-Object-Meta-Fruit: Bacon - // X-Object-Meta-Veggie: Bacon - // X-Object-Meta-Dairy: Bacon - info.Name = objectName - info.ContentType = resp.Header.Get("Content-Type") - if resp.Header.Get("Content-Length") != "" { - if info.Bytes, err = getInt64FromHeader(resp, "Content-Length"); err != nil { - return - } - } - info.ServerLastModified = resp.Header.Get("Last-Modified") - if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil { - return - } - info.Hash = resp.Header.Get("Etag") - return -} - -// ObjectUpdate adds, replaces or removes object metadata. -// -// Add or Update keys by mentioning them in the Metadata. Use -// Metadata.ObjectHeaders and Headers.ObjectMetadata to convert your -// Metadata to and from normal HTTP headers. -// -// This removes all metadata previously added to the object and -// replaces it with that passed in so to delete keys, just don't -// mention them the headers you pass in. -// -// Object metadata can only be read with Object() not with Objects(). -// -// This can also be used to set headers not already assigned such as -// X-Delete-At or X-Delete-After for expiring objects. -// -// You cannot use this to change any of the object's other headers -// such as Content-Type, ETag, etc. -// -// Refer to copying an object when you need to update metadata or -// other headers such as Content-Type or CORS headers. -// -// May return ObjectNotFound. -func (c *Connection) ObjectUpdate(container string, objectName string, h Headers) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "POST", - ErrorMap: objectErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ObjectCopy does a server side copy of an object to a new position -// -// All metadata is preserved. If metadata is set in the headers then -// it overrides the old metadata on the copied object. -// -// The destination container must exist before the copy. -// -// You can use this to copy an object to itself - this is the only way -// to update the content type of an object. -func (c *Connection) ObjectCopy(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string, h Headers) (headers Headers, err error) { - // Meta stuff - extraHeaders := map[string]string{ - "Destination": dstContainer + "/" + dstObjectName, - } - for key, value := range h { - extraHeaders[key] = value - } - _, headers, err = c.storage(RequestOpts{ - Container: srcContainer, - ObjectName: srcObjectName, - Operation: "COPY", - ErrorMap: objectErrorMap, - NoResponse: true, - Headers: extraHeaders, - }) - return -} - -// ObjectMove does a server side move of an object to a new position -// -// This is a convenience method which calls ObjectCopy then ObjectDelete -// -// All metadata is preserved. -// -// The destination container must exist before the copy. -func (c *Connection) ObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) (err error) { - _, err = c.ObjectCopy(srcContainer, srcObjectName, dstContainer, dstObjectName, nil) - if err != nil { - return - } - return c.ObjectDelete(srcContainer, srcObjectName) -} - -// ObjectUpdateContentType updates the content type of an object -// -// This is a convenience method which calls ObjectCopy -// -// All other metadata is preserved. -func (c *Connection) ObjectUpdateContentType(container string, objectName string, contentType string) (err error) { - h := Headers{"Content-Type": contentType} - _, err = c.ObjectCopy(container, objectName, container, objectName, h) - return -} - -// ------------------------------------------------------------ - -// VersionContainerCreate is a helper method for creating and enabling version controlled containers. -// -// It builds the current object container, the non-current object version container, and enables versioning. -// -// If the server doesn't support versioning then it will return -// Forbidden however it will have created both the containers at that point. -func (c *Connection) VersionContainerCreate(current, version string) error { - if err := c.ContainerCreate(version, nil); err != nil { - return err - } - if err := c.ContainerCreate(current, nil); err != nil { - return err - } - if err := c.VersionEnable(current, version); err != nil { - return err - } - return nil -} - -// VersionEnable enables versioning on the current container with version as the tracking container. -// -// May return Forbidden if this isn't supported by the server -func (c *Connection) VersionEnable(current, version string) error { - h := Headers{"X-Versions-Location": version} - if err := c.ContainerUpdate(current, h); err != nil { - return err - } - // Check to see if the header was set properly - _, headers, err := c.Container(current) - if err != nil { - return err - } - // If failed to set versions header, return Forbidden as the server doesn't support this - if headers["X-Versions-Location"] != version { - return Forbidden - } - return nil -} - -// VersionDisable disables versioning on the current container. -func (c *Connection) VersionDisable(current string) error { - h := Headers{"X-Versions-Location": ""} - if err := c.ContainerUpdate(current, h); err != nil { - return err - } - return nil -} - -// VersionObjectList returns a list of older versions of the object. -// -// Objects are returned in the format / -func (c *Connection) VersionObjectList(version, object string) ([]string, error) { - opts := &ObjectsOpts{ - // <3-character zero-padded hexadecimal character length>/ - Prefix: fmt.Sprintf("%03x", len(object)) + object + "/", - } - return c.ObjectNames(version, opts) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go deleted file mode 100644 index e8b1f437..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go +++ /dev/null @@ -1,409 +0,0 @@ -// This tests the swift package internals -// -// It does not require access to a swift server -// -// FIXME need to add more tests and to check URLs and parameters -package swift - -import ( - "fmt" - "io" - "net" - "net/http" - "testing" - - // "net/http/httputil" - // "os" -) - -const ( - TEST_ADDRESS = "localhost:5324" - AUTH_URL = "http://" + TEST_ADDRESS + "/v1.0" - PROXY_URL = "http://" + TEST_ADDRESS + "/proxy" - USERNAME = "test" - APIKEY = "apikey" - AUTH_TOKEN = "token" -) - -// Globals -var ( - server *SwiftServer - c *Connection -) - -// SwiftServer implements a test swift server -type SwiftServer struct { - t *testing.T - checks []*Check -} - -// Used to check and reply to http transactions -type Check struct { - in Headers - out Headers - rx *string - tx *string - err *Error - url *string -} - -// Add a in check -func (check *Check) In(in Headers) *Check { - check.in = in - return check -} - -// Add an out check -func (check *Check) Out(out Headers) *Check { - check.out = out - return check -} - -// Add an Error check -func (check *Check) Error(StatusCode int, Text string) *Check { - check.err = newError(StatusCode, Text) - return check -} - -// Add a rx check -func (check *Check) Rx(rx string) *Check { - check.rx = &rx - return check -} - -// Add an tx check -func (check *Check) Tx(tx string) *Check { - check.tx = &tx - return check -} - -// Add an URL check -func (check *Check) Url(url string) *Check { - check.url = &url - return check -} - -// Add a check -func (s *SwiftServer) AddCheck(t *testing.T) *Check { - server.t = t - check := &Check{ - in: Headers{}, - out: Headers{}, - err: nil, - } - s.checks = append(s.checks, check) - return check -} - -// Responds to a request -func (s *SwiftServer) Respond(w http.ResponseWriter, r *http.Request) { - if len(s.checks) < 1 { - s.t.Fatal("Unexpected http transaction") - } - check := s.checks[0] - s.checks = s.checks[1:] - - // Check URL - if check.url != nil && *check.url != r.URL.String() { - s.t.Errorf("Expecting URL %q but got %q", *check.url, r.URL) - } - - // Check headers - for k, v := range check.in { - actual := r.Header.Get(k) - if actual != v { - s.t.Errorf("Expecting header %q=%q but got %q", k, v, actual) - } - } - // Write output headers - h := w.Header() - for k, v := range check.out { - h.Set(k, v) - } - // Return an error if required - if check.err != nil { - http.Error(w, check.err.Text, check.err.StatusCode) - } else { - if check.tx != nil { - _, err := w.Write([]byte(*check.tx)) - if err != nil { - s.t.Error("Write failed", err) - } - } - } -} - -// Checks to see all responses are used up -func (s *SwiftServer) Finished() { - if len(s.checks) > 0 { - s.t.Error("Unused checks", s.checks) - } -} - -func handle(w http.ResponseWriter, r *http.Request) { - // out, _ := httputil.DumpRequest(r, true) - // os.Stdout.Write(out) - server.Respond(w, r) -} - -func NewSwiftServer() *SwiftServer { - server := &SwiftServer{} - http.HandleFunc("/", handle) - go http.ListenAndServe(TEST_ADDRESS, nil) - fmt.Print("Waiting for server to start ") - for { - fmt.Print(".") - conn, err := net.Dial("tcp", TEST_ADDRESS) - if err == nil { - conn.Close() - fmt.Println(" Started") - break - } - } - return server -} - -func init() { - server = NewSwiftServer() - c = &Connection{ - UserName: USERNAME, - ApiKey: APIKEY, - AuthUrl: AUTH_URL, - } -} - -// Check the error is a swift error -func checkError(t *testing.T, err error, StatusCode int, Text string) { - if err == nil { - t.Fatal("No error returned") - } - err2, ok := err.(*Error) - if !ok { - t.Fatal("Bad error type") - } - if err2.StatusCode != StatusCode { - t.Fatalf("Bad status code, expecting %d got %d", StatusCode, err2.StatusCode) - } - if err2.Text != Text { - t.Fatalf("Bad error string, expecting %q got %q", Text, err2.Text) - } -} - -// FIXME copied from swift_test.go -func compareMaps(t *testing.T, a, b map[string]string) { - if len(a) != len(b) { - t.Error("Maps different sizes", a, b) - } - for ka, va := range a { - if vb, ok := b[ka]; !ok || va != vb { - t.Error("Difference in key", ka, va, b[ka]) - } - } - for kb, vb := range b { - if va, ok := a[kb]; !ok || vb != va { - t.Error("Difference in key", kb, vb, a[kb]) - } - } -} - -func TestInternalError(t *testing.T) { - e := newError(404, "Not Found!") - if e.StatusCode != 404 || e.Text != "Not Found!" { - t.Fatal("Bad error") - } - if e.Error() != "Not Found!" { - t.Fatal("Bad error") - } - -} - -func testCheckClose(c io.Closer, e error) (err error) { - err = e - defer checkClose(c, &err) - return -} - -// Make a closer which returns the error of our choice -type myCloser struct { - err error -} - -func (c *myCloser) Close() error { - return c.err -} - -func TestInternalCheckClose(t *testing.T) { - if testCheckClose(&myCloser{nil}, nil) != nil { - t.Fatal("bad 1") - } - if testCheckClose(&myCloser{nil}, ObjectCorrupted) != ObjectCorrupted { - t.Fatal("bad 2") - } - if testCheckClose(&myCloser{ObjectNotFound}, nil) != ObjectNotFound { - t.Fatal("bad 3") - } - if testCheckClose(&myCloser{ObjectNotFound}, ObjectCorrupted) != ObjectCorrupted { - t.Fatal("bad 4") - } -} - -func TestInternalParseHeaders(t *testing.T) { - resp := &http.Response{StatusCode: 200} - if c.parseHeaders(resp, nil) != nil { - t.Error("Bad 1") - } - if c.parseHeaders(resp, authErrorMap) != nil { - t.Error("Bad 1") - } - - resp = &http.Response{StatusCode: 299} - if c.parseHeaders(resp, nil) != nil { - t.Error("Bad 1") - } - - resp = &http.Response{StatusCode: 199, Status: "BOOM"} - checkError(t, c.parseHeaders(resp, nil), 199, "HTTP Error: 199: BOOM") - - resp = &http.Response{StatusCode: 300, Status: "BOOM"} - checkError(t, c.parseHeaders(resp, nil), 300, "HTTP Error: 300: BOOM") - - resp = &http.Response{StatusCode: 404, Status: "BOOM"} - checkError(t, c.parseHeaders(resp, nil), 404, "HTTP Error: 404: BOOM") - if c.parseHeaders(resp, ContainerErrorMap) != ContainerNotFound { - t.Error("Bad 1") - } - if c.parseHeaders(resp, objectErrorMap) != ObjectNotFound { - t.Error("Bad 1") - } -} - -func TestInternalReadHeaders(t *testing.T) { - resp := &http.Response{Header: http.Header{}} - compareMaps(t, readHeaders(resp), Headers{}) - - resp = &http.Response{Header: http.Header{ - "one": []string{"1"}, - "two": []string{"2"}, - }} - compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"}) - - // FIXME this outputs a log which we should test and check - resp = &http.Response{Header: http.Header{ - "one": []string{"1", "11", "111"}, - "two": []string{"2"}, - }} - compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"}) -} - -func TestInternalStorage(t *testing.T) { - // FIXME -} - -// ------------------------------------------------------------ - -func TestInternalAuthenticate(t *testing.T) { - server.AddCheck(t).In(Headers{ - "User-Agent": DefaultUserAgent, - "X-Auth-Key": APIKEY, - "X-Auth-User": USERNAME, - }).Out(Headers{ - "X-Storage-Url": PROXY_URL, - "X-Auth-Token": AUTH_TOKEN, - }).Url("/v1.0") - defer server.Finished() - - err := c.Authenticate() - if err != nil { - t.Fatal(err) - } - if c.StorageUrl != PROXY_URL { - t.Error("Bad storage url") - } - if c.AuthToken != AUTH_TOKEN { - t.Error("Bad auth token") - } - if !c.Authenticated() { - t.Error("Didn't authenticate") - } -} - -func TestInternalAuthenticateDenied(t *testing.T) { - server.AddCheck(t).Error(400, "Bad request") - server.AddCheck(t).Error(401, "DENIED") - defer server.Finished() - c.UnAuthenticate() - err := c.Authenticate() - if err != AuthorizationFailed { - t.Fatal("Expecting AuthorizationFailed", err) - } - // FIXME - // if c.Authenticated() { - // t.Fatal("Expecting not authenticated") - // } -} - -func TestInternalAuthenticateBad(t *testing.T) { - server.AddCheck(t).Out(Headers{ - "X-Storage-Url": PROXY_URL, - }) - defer server.Finished() - err := c.Authenticate() - checkError(t, err, 0, "Response didn't have storage url and auth token") - if c.Authenticated() { - t.Fatal("Expecting not authenticated") - } - - server.AddCheck(t).Out(Headers{ - "X-Auth-Token": AUTH_TOKEN, - }) - err = c.Authenticate() - checkError(t, err, 0, "Response didn't have storage url and auth token") - if c.Authenticated() { - t.Fatal("Expecting not authenticated") - } - - server.AddCheck(t) - err = c.Authenticate() - checkError(t, err, 0, "Response didn't have storage url and auth token") - if c.Authenticated() { - t.Fatal("Expecting not authenticated") - } - - server.AddCheck(t).Out(Headers{ - "X-Storage-Url": PROXY_URL, - "X-Auth-Token": AUTH_TOKEN, - }) - err = c.Authenticate() - if err != nil { - t.Fatal(err) - } - if !c.Authenticated() { - t.Fatal("Expecting authenticated") - } -} - -func testContainerNames(t *testing.T, rx string, expected []string) { - server.AddCheck(t).In(Headers{ - "User-Agent": DefaultUserAgent, - "X-Auth-Token": AUTH_TOKEN, - }).Tx(rx).Url("/proxy") - containers, err := c.ContainerNames(nil) - if err != nil { - t.Fatal(err) - } - if len(containers) != len(expected) { - t.Fatal("Wrong number of containers", len(containers), rx, len(expected), expected) - } - for i := range containers { - if containers[i] != expected[i] { - t.Error("Bad container", containers[i], expected[i]) - } - } -} -func TestInternalContainerNames(t *testing.T) { - defer server.Finished() - testContainerNames(t, "", []string{}) - testContainerNames(t, "one", []string{"one"}) - testContainerNames(t, "one\n", []string{"one"}) - testContainerNames(t, "one\ntwo\nthree\n", []string{"one", "two", "three"}) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go deleted file mode 100644 index 57f7d9e8..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go +++ /dev/null @@ -1,1472 +0,0 @@ -// This tests the swift packagae -// -// It can be used with a real swift server which should be set up in -// the environment variables SWIFT_API_USER, SWIFT_API_KEY and -// SWIFT_AUTH_URL -// In case those variables are not defined, a fake Swift server -// is used instead - see Testing in README.md for more info -// -// The functions are designed to run in order and create things the -// next function tests. This means that if it goes wrong it is likely -// errors will propagate. You may need to tidy up the CONTAINER to -// get it to run cleanly. -package swift_test - -import ( - "archive/tar" - "bytes" - "crypto/md5" - "crypto/tls" - "encoding/json" - "encoding/xml" - "fmt" - "github.com/ncw/swift" - "github.com/ncw/swift/swifttest" - "io" - "net/http" - "os" - "strconv" - "strings" - "sync" - "testing" - "time" -) - -var ( - c *swift.Connection - srv *swifttest.SwiftServer - m1 = swift.Metadata{"Hello": "1", "potato-Salad": "2"} - m2 = swift.Metadata{"hello": "", "potato-salad": ""} - skipVersionTests = false -) - -const ( - CONTAINER = "GoSwiftUnitTest" - VERSIONS_CONTAINER = "GoSwiftUnitTestVersions" - CURRENT_CONTAINER = "GoSwiftUnitTestCurrent" - OBJECT = "test_object" - OBJECT2 = "test_object2" - EMPTYOBJECT = "empty_test_object" - CONTENTS = "12345" - CONTENTS2 = "54321" - CONTENT_SIZE = int64(len(CONTENTS)) - CONTENT_MD5 = "827ccb0eea8a706c4c34a16891f84e7b" - EMPTY_MD5 = "d41d8cd98f00b204e9800998ecf8427e" -) - -type someTransport struct{ http.Transport } - -func makeConnection() (*swift.Connection, error) { - var err error - - UserName := os.Getenv("SWIFT_API_USER") - ApiKey := os.Getenv("SWIFT_API_KEY") - AuthUrl := os.Getenv("SWIFT_AUTH_URL") - - Insecure := os.Getenv("SWIFT_AUTH_INSECURE") - ConnectionChannelTimeout := os.Getenv("SWIFT_CONNECTION_CHANNEL_TIMEOUT") - DataChannelTimeout := os.Getenv("SWIFT_DATA_CHANNEL_TIMEOUT") - - if UserName == "" || ApiKey == "" || AuthUrl == "" { - if srv != nil { - srv.Close() - } - srv, err = swifttest.NewSwiftServer("localhost") - if err != nil { - return nil, err - } - - UserName = "swifttest" - ApiKey = "swifttest" - AuthUrl = srv.AuthURL - } - - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - MaxIdleConnsPerHost: 2048, - } - if Insecure == "1" { - transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - } - - c := swift.Connection{ - UserName: UserName, - ApiKey: ApiKey, - AuthUrl: AuthUrl, - Transport: transport, - ConnectTimeout: 60 * time.Second, - Timeout: 60 * time.Second, - } - - var timeout int64 - if ConnectionChannelTimeout != "" { - timeout, err = strconv.ParseInt(ConnectionChannelTimeout, 10, 32) - if err == nil { - c.ConnectTimeout = time.Duration(timeout) * time.Second - } - } - - if DataChannelTimeout != "" { - timeout, err = strconv.ParseInt(DataChannelTimeout, 10, 32) - if err == nil { - c.Timeout = time.Duration(timeout) * time.Second - } - } - - return &c, nil -} - -func isV3Api() bool { - AuthUrl := os.Getenv("SWIFT_AUTH_URL") - return strings.Contains(AuthUrl, "v3") -} - -func TestTransport(t *testing.T) { - var err error - - c, err = makeConnection() - if err != nil { - t.Fatal("Failed to create server", err) - } - - tr := &someTransport{ - Transport: http.Transport{ - MaxIdleConnsPerHost: 2048, - }, - } - - Insecure := os.Getenv("SWIFT_AUTH_INSECURE") - - if Insecure == "1" { - tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - } - - c.Transport = tr - - if isV3Api() { - c.Tenant = os.Getenv("SWIFT_TENANT") - c.Domain = os.Getenv("SWIFT_API_DOMAIN") - } else { - c.Tenant = os.Getenv("SWIFT_TENANT") - c.TenantId = os.Getenv("SWIFT_TENANT_ID") - } - - err = c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } - if srv != nil { - srv.Close() - } -} - -// The following Test functions are run in order - this one must come before the others! -func TestV1V2Authenticate(t *testing.T) { - var err error - - if isV3Api() { - return - } - - c, err = makeConnection() - if err != nil { - t.Fatal("Failed to create server", err) - } - - c.Tenant = os.Getenv("SWIFT_TENANT") - c.TenantId = os.Getenv("SWIFT_TENANT_ID") - - err = c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} - -func TestV3AuthenticateWithDomainNameAndTenantId(t *testing.T) { - var err error - if !isV3Api() { - return - } - - c, err = makeConnection() - if err != nil { - t.Fatal("Failed to create server", err) - } - - c.TenantId = os.Getenv("SWIFT_TENANT_ID") - c.Domain = os.Getenv("SWIFT_API_DOMAIN") - - err = c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} - -func TestV3AuthenticateWithDomainIdAndTenantId(t *testing.T) { - var err error - - if !isV3Api() { - return - } - - c, err = makeConnection() - if err != nil { - t.Fatal("Failed to create server", err) - } - - c.TenantId = os.Getenv("SWIFT_TENANT_ID") - c.DomainId = os.Getenv("SWIFT_API_DOMAIN_ID") - - err = c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} - -func TestV3AuthenticateWithDomainNameAndTenantName(t *testing.T) { - var err error - - if !isV3Api() { - return - } - - c, err = makeConnection() - if err != nil { - t.Fatal("Failed to create server", err) - } - - c.Tenant = os.Getenv("SWIFT_TENANT") - c.Domain = os.Getenv("SWIFT_API_DOMAIN") - - err = c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} - -func TestV3AuthenticateWithDomainIdAndTenantName(t *testing.T) { - var err error - - if !isV3Api() { - return - } - - c, err = makeConnection() - if err != nil { - t.Fatal("Failed to create server", err) - } - - c.Tenant = os.Getenv("SWIFT_TENANT") - c.DomainId = os.Getenv("SWIFT_API_DOMAIN_ID") - - err = c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} - -// Attempt to trigger a race in authenticate -// -// Run with -race to test -func TestAuthenticateRace(t *testing.T) { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - err := c.Authenticate() - if err != nil { - t.Fatal("Auth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } - }() - } - wg.Wait() -} - -// Test a connection can be serialized and unserialized with JSON -func TestSerializeConnectionJson(t *testing.T) { - serializedConnection, err := json.Marshal(c) - if err != nil { - t.Fatalf("Failed to serialize connection: %v", err) - } - c2 := new(swift.Connection) - err = json.Unmarshal(serializedConnection, &c2) - if err != nil { - t.Fatalf("Failed to unserialize connection: %v", err) - } - if !c2.Authenticated() { - t.Fatal("Should be authenticated") - } - _, _, err = c2.Account() - if err != nil { - t.Fatalf("Failed to use unserialized connection: %v", err) - } -} - -// Test a connection can be serialized and unserialized with XML -func TestSerializeConnectionXml(t *testing.T) { - serializedConnection, err := xml.Marshal(c) - if err != nil { - t.Fatalf("Failed to serialize connection: %v", err) - } - c2 := new(swift.Connection) - err = xml.Unmarshal(serializedConnection, &c2) - if err != nil { - t.Fatalf("Failed to unserialize connection: %v", err) - } - if !c2.Authenticated() { - t.Fatal("Should be authenticated") - } - _, _, err = c2.Account() - if err != nil { - t.Fatalf("Failed to use unserialized connection: %v", err) - } -} - -// Test the reauthentication logic -func TestOnReAuth(t *testing.T) { - c2 := c - c2.UnAuthenticate() - _, _, err := c2.Account() - if err != nil { - t.Fatalf("Failed to reauthenticate: %v", err) - } -} -func TestAccount(t *testing.T) { - info, headers, err := c.Account() - if err != nil { - t.Fatal(err) - } - if headers["X-Account-Container-Count"] != fmt.Sprintf("%d", info.Containers) { - t.Error("Bad container count") - } - if headers["X-Account-Bytes-Used"] != fmt.Sprintf("%d", info.BytesUsed) { - t.Error("Bad bytes count") - } - if headers["X-Account-Object-Count"] != fmt.Sprintf("%d", info.Objects) { - t.Error("Bad objects count") - } - //fmt.Println(info) - //fmt.Println(headers) -} - -func compareMaps(t *testing.T, a, b map[string]string) { - if len(a) != len(b) { - t.Error("Maps different sizes", a, b) - } - for ka, va := range a { - if vb, ok := b[ka]; !ok || va != vb { - t.Error("Difference in key", ka, va, b[ka]) - } - } - for kb, vb := range b { - if va, ok := a[kb]; !ok || vb != va { - t.Error("Difference in key", kb, vb, a[kb]) - } - } -} - -func TestAccountUpdate(t *testing.T) { - err := c.AccountUpdate(m1.AccountHeaders()) - if err != nil { - t.Fatal(err) - } - - _, headers, err := c.Account() - if err != nil { - t.Fatal(err) - } - m := headers.AccountMetadata() - delete(m, "temp-url-key") // remove X-Account-Meta-Temp-URL-Key if set - compareMaps(t, m, map[string]string{"hello": "1", "potato-salad": "2"}) - - err = c.AccountUpdate(m2.AccountHeaders()) - if err != nil { - t.Fatal(err) - } - - _, headers, err = c.Account() - if err != nil { - t.Fatal(err) - } - m = headers.AccountMetadata() - delete(m, "temp-url-key") // remove X-Account-Meta-Temp-URL-Key if set - compareMaps(t, m, map[string]string{}) - - //fmt.Println(c.Account()) - //fmt.Println(headers) - //fmt.Println(headers.AccountMetadata()) - //fmt.Println(c.AccountUpdate(m2.AccountHeaders())) - //fmt.Println(c.Account()) -} - -func TestContainerCreate(t *testing.T) { - err := c.ContainerCreate(CONTAINER, m1.ContainerHeaders()) - if err != nil { - t.Fatal(err) - } -} - -func TestContainer(t *testing.T) { - info, headers, err := c.Container(CONTAINER) - if err != nil { - t.Fatal(err) - } - compareMaps(t, headers.ContainerMetadata(), map[string]string{"hello": "1", "potato-salad": "2"}) - if CONTAINER != info.Name { - t.Error("Bad container count") - } - if headers["X-Container-Bytes-Used"] != fmt.Sprintf("%d", info.Bytes) { - t.Error("Bad bytes count") - } - if headers["X-Container-Object-Count"] != fmt.Sprintf("%d", info.Count) { - t.Error("Bad objects count") - } - //fmt.Println(info) - //fmt.Println(headers) -} - -func TestContainersAll(t *testing.T) { - containers1, err := c.ContainersAll(nil) - if err != nil { - t.Fatal(err) - } - containers2, err := c.Containers(nil) - if err != nil { - t.Fatal(err) - } - if len(containers1) != len(containers2) { - t.Fatal("Wrong length") - } - for i := range containers1 { - if containers1[i] != containers2[i] { - t.Fatal("Not the same") - } - } -} - -func TestContainersAllWithLimit(t *testing.T) { - containers1, err := c.ContainersAll(&swift.ContainersOpts{Limit: 1}) - if err != nil { - t.Fatal(err) - } - containers2, err := c.Containers(nil) - if err != nil { - t.Fatal(err) - } - if len(containers1) != len(containers2) { - t.Fatal("Wrong length") - } - for i := range containers1 { - if containers1[i] != containers2[i] { - t.Fatal("Not the same") - } - } -} - -func TestContainerUpdate(t *testing.T) { - err := c.ContainerUpdate(CONTAINER, m2.ContainerHeaders()) - if err != nil { - t.Fatal(err) - } - _, headers, err := c.Container(CONTAINER) - if err != nil { - t.Fatal(err) - } - compareMaps(t, headers.ContainerMetadata(), map[string]string{}) - //fmt.Println(headers) -} - -func TestContainerNames(t *testing.T) { - containers, err := c.ContainerNames(nil) - if err != nil { - t.Fatal(err) - } - // fmt.Printf("container %q\n", CONTAINER) - ok := false - for _, container := range containers { - if container == CONTAINER { - ok = true - break - } - } - if !ok { - t.Errorf("Didn't find container %q in listing %q", CONTAINER, containers) - } - // fmt.Println(containers) -} - -func TestContainerNamesAll(t *testing.T) { - containers1, err := c.ContainerNamesAll(nil) - if err != nil { - t.Fatal(err) - } - containers2, err := c.ContainerNames(nil) - if err != nil { - t.Fatal(err) - } - if len(containers1) != len(containers2) { - t.Fatal("Wrong length") - } - for i := range containers1 { - if containers1[i] != containers2[i] { - t.Fatal("Not the same") - } - } -} - -func TestContainerNamesAllWithLimit(t *testing.T) { - containers1, err := c.ContainerNamesAll(&swift.ContainersOpts{Limit: 1}) - if err != nil { - t.Fatal(err) - } - containers2, err := c.ContainerNames(nil) - if err != nil { - t.Fatal(err) - } - if len(containers1) != len(containers2) { - t.Fatal("Wrong length") - } - for i := range containers1 { - if containers1[i] != containers2[i] { - t.Fatal("Not the same") - } - } -} - -func TestObjectPutString(t *testing.T) { - err := c.ObjectPutString(CONTAINER, OBJECT, CONTENTS, "") - if err != nil { - t.Fatal(err) - } - - info, _, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Error(err) - } - if info.ContentType != "application/octet-stream" { - t.Error("Bad content type", info.ContentType) - } - if info.Bytes != CONTENT_SIZE { - t.Error("Bad length") - } - if info.Hash != CONTENT_MD5 { - t.Error("Bad length") - } -} - -func TestObjectEmpty(t *testing.T) { - err := c.ObjectPutString(CONTAINER, EMPTYOBJECT, "", "") - if err != nil { - t.Fatal(err) - } - - info, _, err := c.Object(CONTAINER, EMPTYOBJECT) - if err != nil { - t.Error(err) - } - if info.ContentType != "application/octet-stream" { - t.Error("Bad content type", info.ContentType) - } - if info.Bytes != 0 { - t.Errorf("Bad length want 0 got %v", info.Bytes) - } - if info.Hash != EMPTY_MD5 { - t.Errorf("Bad MD5 want %v got %v", EMPTY_MD5, info.Hash) - } - - // Tidy up - err = c.ObjectDelete(CONTAINER, EMPTYOBJECT) - if err != nil { - t.Error(err) - } -} - -func TestObjectPutBytes(t *testing.T) { - err := c.ObjectPutBytes(CONTAINER, OBJECT, []byte(CONTENTS), "") - if err != nil { - t.Fatal(err) - } - - info, _, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Error(err) - } - if info.ContentType != "application/octet-stream" { - t.Error("Bad content type", info.ContentType) - } - if info.Bytes != CONTENT_SIZE { - t.Error("Bad length") - } - if info.Hash != CONTENT_MD5 { - t.Error("Bad length") - } -} - -func TestObjectPutMimeType(t *testing.T) { - err := c.ObjectPutString(CONTAINER, "test.jpg", CONTENTS, "") - if err != nil { - t.Fatal(err) - } - - info, _, err := c.Object(CONTAINER, "test.jpg") - if err != nil { - t.Error(err) - } - if info.ContentType != "image/jpeg" { - t.Error("Bad content type", info.ContentType) - } - - // Tidy up - err = c.ObjectDelete(CONTAINER, "test.jpg") - if err != nil { - t.Error(err) - } -} - -func TestObjectCreate(t *testing.T) { - out, err := c.ObjectCreate(CONTAINER, OBJECT2, true, "", "", nil) - if err != nil { - t.Fatal(err) - } - buf := &bytes.Buffer{} - hash := md5.New() - out2 := io.MultiWriter(out, buf, hash) - for i := 0; i < 100; i++ { - fmt.Fprintf(out2, "%d %s\n", i, CONTENTS) - } - err = out.Close() - if err != nil { - t.Error(err) - } - expected := buf.String() - contents, err := c.ObjectGetString(CONTAINER, OBJECT2) - if err != nil { - t.Error(err) - } - if contents != expected { - t.Error("Contents wrong") - } - - // Test writing on closed file - n, err := out.Write([]byte{0}) - if err == nil || n != 0 { - t.Error("Expecting error and n == 0 writing on closed file", err, n) - } - - // Now with hash instead - out, err = c.ObjectCreate(CONTAINER, OBJECT2, false, fmt.Sprintf("%x", hash.Sum(nil)), "", nil) - if err != nil { - t.Fatal(err) - } - _, err = out.Write(buf.Bytes()) - if err != nil { - t.Error(err) - } - err = out.Close() - if err != nil { - t.Error(err) - } - contents, err = c.ObjectGetString(CONTAINER, OBJECT2) - if err != nil { - t.Error(err) - } - if contents != expected { - t.Error("Contents wrong") - } - - // Now with bad hash - out, err = c.ObjectCreate(CONTAINER, OBJECT2, false, CONTENT_MD5, "", nil) - if err != nil { - t.Fatal(err) - } - // FIXME: work around bug which produces 503 not 422 for empty corrupted files - fmt.Fprintf(out, "Sausage") - err = out.Close() - if err != swift.ObjectCorrupted { - t.Error("Expecting object corrupted not", err) - } - - // Tidy up - err = c.ObjectDelete(CONTAINER, OBJECT2) - if err != nil { - t.Error(err) - } -} - -func TestObjectGetString(t *testing.T) { - contents, err := c.ObjectGetString(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - if contents != CONTENTS { - t.Error("Contents wrong") - } - //fmt.Println(contents) -} - -func TestObjectGetBytes(t *testing.T) { - contents, err := c.ObjectGetBytes(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - if string(contents) != CONTENTS { - t.Error("Contents wrong") - } - //fmt.Println(contents) -} - -func TestObjectOpen(t *testing.T) { - file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - n, err := io.Copy(&buf, file) - if err != nil { - t.Fatal(err) - } - if n != CONTENT_SIZE { - t.Fatal("Wrong length", n, CONTENT_SIZE) - } - if buf.String() != CONTENTS { - t.Error("Contents wrong") - } - err = file.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestObjectOpenPartial(t *testing.T) { - file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - n, err := io.CopyN(&buf, file, 1) - if err != nil { - t.Fatal(err) - } - if n != 1 { - t.Fatal("Wrong length", n, CONTENT_SIZE) - } - if buf.String() != CONTENTS[:1] { - t.Error("Contents wrong") - } - err = file.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestObjectOpenLength(t *testing.T) { - file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) - if err != nil { - t.Fatal(err) - } - // FIXME ideally this would check both branches of the Length() code - n, err := file.Length() - if err != nil { - t.Fatal(err) - } - if n != CONTENT_SIZE { - t.Fatal("Wrong length", n, CONTENT_SIZE) - } - err = file.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestObjectOpenSeek(t *testing.T) { - - plan := []struct { - whence int - offset int64 - result int64 - }{ - {-1, 0, 0}, - {-1, 0, 1}, - {-1, 0, 2}, - {0, 0, 0}, - {0, 0, 0}, - {0, 1, 1}, - {0, 2, 2}, - {1, 0, 3}, - {1, -2, 2}, - {1, 1, 4}, - {2, -1, 4}, - {2, -3, 2}, - {2, -2, 3}, - {2, -5, 0}, - {2, -4, 1}, - } - - file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) - if err != nil { - t.Fatal(err) - } - - for _, p := range plan { - if p.whence >= 0 { - result, err := file.Seek(p.offset, p.whence) - if err != nil { - t.Fatal(err, p) - } - if result != p.result { - t.Fatal("Seek result was", result, "expecting", p.result, p) - } - - } - var buf bytes.Buffer - n, err := io.CopyN(&buf, file, 1) - if err != nil { - t.Fatal(err, p) - } - if n != 1 { - t.Fatal("Wrong length", n, p) - } - actual := buf.String() - expected := CONTENTS[p.result : p.result+1] - if actual != expected { - t.Error("Contents wrong, expecting", expected, "got", actual, p) - } - } - - err = file.Close() - if err != nil { - t.Fatal(err) - } -} - -func TestObjectUpdate(t *testing.T) { - err := c.ObjectUpdate(CONTAINER, OBJECT, m1.ObjectHeaders()) - if err != nil { - t.Fatal(err) - } -} - -func checkTime(t *testing.T, when time.Time, low, high int) { - dt := time.Now().Sub(when) - if dt < time.Duration(low)*time.Second || dt > time.Duration(high)*time.Second { - t.Errorf("Time is wrong: dt=%q, when=%q", dt, when) - } -} - -func TestObject(t *testing.T) { - object, headers, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "1", "potato-salad": "2"}) - if object.Name != OBJECT || object.Bytes != CONTENT_SIZE || object.ContentType != "application/octet-stream" || object.Hash != CONTENT_MD5 || object.PseudoDirectory != false || object.SubDir != "" { - t.Error("Bad object info", object) - } - checkTime(t, object.LastModified, -10, 10) -} - -func TestObjectUpdate2(t *testing.T) { - err := c.ObjectUpdate(CONTAINER, OBJECT, m2.ObjectHeaders()) - if err != nil { - t.Fatal(err) - } - _, headers, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - //fmt.Println(headers, headers.ObjectMetadata()) - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""}) -} - -func TestContainers(t *testing.T) { - containers, err := c.Containers(nil) - if err != nil { - t.Fatal(err) - } - ok := false - for _, container := range containers { - if container.Name == CONTAINER { - ok = true - // Container may or may not have the file contents in it - // Swift updates may be behind - if container.Count == 0 && container.Bytes == 0 { - break - } - if container.Count == 1 && container.Bytes == CONTENT_SIZE { - break - } - t.Errorf("Bad size of Container %q: %q", CONTAINER, container) - break - } - } - if !ok { - t.Errorf("Didn't find container %q in listing %q", CONTAINER, containers) - } - //fmt.Println(containers) -} - -func TestObjectNames(t *testing.T) { - objects, err := c.ObjectNames(CONTAINER, nil) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectNamesAll(t *testing.T) { - objects, err := c.ObjectNamesAll(CONTAINER, nil) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectNamesAllWithLimit(t *testing.T) { - objects, err := c.ObjectNamesAll(CONTAINER, &swift.ObjectsOpts{Limit: 1}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectsWalk(t *testing.T) { - objects := make([]string, 0) - err := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) { - newObjects, err := c.ObjectNames(CONTAINER, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjects(t *testing.T) { - objects, err := c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/'}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 { - t.Fatal("Should only be 1 object") - } - object := objects[0] - if object.Name != OBJECT || object.Bytes != CONTENT_SIZE || object.ContentType != "application/octet-stream" || object.Hash != CONTENT_MD5 || object.PseudoDirectory != false || object.SubDir != "" { - t.Error("Bad object info", object) - } - checkTime(t, object.LastModified, -10, 10) - // fmt.Println(objects) -} - -func TestObjectsDirectory(t *testing.T) { - err := c.ObjectPutString(CONTAINER, "directory", "", "application/directory") - if err != nil { - t.Fatal(err) - } - defer c.ObjectDelete(CONTAINER, "directory") - - // Look for the directory object and check we aren't confusing - // it with a pseudo directory object - objects, err := c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/'}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 2 { - t.Fatal("Should only be 2 objects") - } - found := false - for i := range objects { - object := objects[i] - if object.Name == "directory" { - found = true - if object.Bytes != 0 || object.ContentType != "application/directory" || object.Hash != "d41d8cd98f00b204e9800998ecf8427e" || object.PseudoDirectory != false || object.SubDir != "" { - t.Error("Bad object info", object) - } - checkTime(t, object.LastModified, -10, 10) - } - } - if !found { - t.Error("Didn't find directory object") - } - // fmt.Println(objects) -} - -func TestObjectsPseudoDirectory(t *testing.T) { - err := c.ObjectPutString(CONTAINER, "directory/puppy.jpg", "cute puppy", "") - if err != nil { - t.Fatal(err) - } - defer c.ObjectDelete(CONTAINER, "directory/puppy.jpg") - - // Look for the pseudo directory - objects, err := c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/'}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 2 { - t.Fatal("Should only be 2 objects", objects) - } - found := false - for i := range objects { - object := objects[i] - if object.Name == "directory/" { - found = true - if object.Bytes != 0 || object.ContentType != "application/directory" || object.Hash != "" || object.PseudoDirectory != true || object.SubDir != "directory/" && object.LastModified.IsZero() { - t.Error("Bad object info", object) - } - } - } - if !found { - t.Error("Didn't find directory object", objects) - } - - // Look in the pseudo directory now - objects, err = c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: "directory/"}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 { - t.Fatal("Should only be 1 object", objects) - } - object := objects[0] - if object.Name != "directory/puppy.jpg" || object.Bytes != 10 || object.ContentType != "image/jpeg" || object.Hash != "87a12ea22fca7f54f0cefef1da535489" || object.PseudoDirectory != false || object.SubDir != "" { - t.Error("Bad object info", object) - } - checkTime(t, object.LastModified, -10, 10) - // fmt.Println(objects) -} - -func TestObjectsAll(t *testing.T) { - objects, err := c.ObjectsAll(CONTAINER, nil) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0].Name != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectsAllWithLimit(t *testing.T) { - objects, err := c.ObjectsAll(CONTAINER, &swift.ObjectsOpts{Limit: 1}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0].Name != OBJECT { - t.Error("Incorrect listing", objects) - } - //fmt.Println(objects) -} - -func TestObjectNamesWithPath(t *testing.T) { - objects, err := c.ObjectNames(CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: ""}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 1 || objects[0] != OBJECT { - t.Error("Bad listing with path", objects) - } - // fmt.Println(objects) - objects, err = c.ObjectNames(CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: "Downloads/"}) - if err != nil { - t.Fatal(err) - } - if len(objects) != 0 { - t.Error("Bad listing with path", objects) - } - // fmt.Println(objects) -} - -func TestObjectCopy(t *testing.T) { - _, err := c.ObjectCopy(CONTAINER, OBJECT, CONTAINER, OBJECT2, nil) - if err != nil { - t.Fatal(err) - } - err = c.ObjectDelete(CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } -} - -func TestObjectCopyWithMetadata(t *testing.T) { - m := swift.Metadata{} - m["copy-special-metadata"] = "hello" - m["hello"] = "3" - h := m.ObjectHeaders() - h["Content-Type"] = "image/jpeg" - _, err := c.ObjectCopy(CONTAINER, OBJECT, CONTAINER, OBJECT2, h) - if err != nil { - t.Fatal(err) - } - // Re-read the metadata to see if it is correct - _, headers, err := c.Object(CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } - if headers["Content-Type"] != "image/jpeg" { - t.Error("Didn't change content type") - } - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "3", "potato-salad": "", "copy-special-metadata": "hello"}) - err = c.ObjectDelete(CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } -} - -func TestObjectMove(t *testing.T) { - err := c.ObjectMove(CONTAINER, OBJECT, CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } - testExistenceAfterDelete(t, CONTAINER, OBJECT) - _, _, err = c.Object(CONTAINER, OBJECT2) - if err != nil { - t.Fatal(err) - } - - err = c.ObjectMove(CONTAINER, OBJECT2, CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - testExistenceAfterDelete(t, CONTAINER, OBJECT2) - _, headers, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""}) -} - -func TestObjectUpdateContentType(t *testing.T) { - err := c.ObjectUpdateContentType(CONTAINER, OBJECT, "text/potato") - if err != nil { - t.Fatal(err) - } - // Re-read the metadata to see if it is correct - _, headers, err := c.Object(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - if headers["Content-Type"] != "text/potato" { - t.Error("Didn't change content type") - } - compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""}) -} - -func TestVersionContainerCreate(t *testing.T) { - if err := c.VersionContainerCreate(CURRENT_CONTAINER, VERSIONS_CONTAINER); err != nil { - if err == swift.Forbidden { - t.Log("Server doesn't support Versions - skipping test") - skipVersionTests = true - return - } - t.Fatal(err) - } -} - -func TestVersionObjectAdd(t *testing.T) { - if skipVersionTests { - t.Log("Server doesn't support Versions - skipping test") - return - } - // Version 1 - if err := c.ObjectPutString(CURRENT_CONTAINER, OBJECT, CONTENTS, ""); err != nil { - t.Fatal(err) - } - if contents, err := c.ObjectGetString(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } else if contents != CONTENTS { - t.Error("Contents wrong") - } - - // Version 2 - if err := c.ObjectPutString(CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil { - t.Fatal(err) - } - if contents, err := c.ObjectGetString(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } else if contents != CONTENTS2 { - t.Error("Contents wrong") - } - - // Version 3 - if err := c.ObjectPutString(CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil { - t.Fatal(err) - } -} - -func TestVersionObjectList(t *testing.T) { - if skipVersionTests { - t.Log("Server doesn't support Versions - skipping test") - return - } - list, err := c.VersionObjectList(VERSIONS_CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - - if len(list) != 2 { - t.Error("Version list should return 2 objects") - } - - //fmt.Print(list) -} - -func TestVersionObjectDelete(t *testing.T) { - if skipVersionTests { - t.Log("Server doesn't support Versions - skipping test") - return - } - // Delete Version 3 - if err := c.ObjectDelete(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } - - // Delete Version 2 - if err := c.ObjectDelete(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } - - // Contents should be reverted to Version 1 - if contents, err := c.ObjectGetString(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } else if contents != CONTENTS { - t.Error("Contents wrong") - } -} - -// cleanUpContainer deletes everything in the container and then the -// container. It expects the container to be empty and if it wasn't -// it logs an error. -func cleanUpContainer(t *testing.T, container string) { - objects, err := c.Objects(container, nil) - if err != nil { - t.Error(err, container) - } else { - if len(objects) != 0 { - t.Error("Container not empty", container) - } - for _, object := range objects { - t.Log("Deleting spurious", object.Name) - err = c.ObjectDelete(container, object.Name) - if err != nil { - t.Error(err, container) - } - } - } - - if err := c.ContainerDelete(container); err != nil { - t.Error(err, container) - } -} - -func TestVersionDeleteContent(t *testing.T) { - if skipVersionTests { - t.Log("Server doesn't support Versions - skipping test") - } else { - // Delete Version 1 - if err := c.ObjectDelete(CURRENT_CONTAINER, OBJECT); err != nil { - t.Fatal(err) - } - } - cleanUpContainer(t, VERSIONS_CONTAINER) - cleanUpContainer(t, CURRENT_CONTAINER) -} - -// Check for non existence after delete -// May have to do it a few times to wait for swift to be consistent. -func testExistenceAfterDelete(t *testing.T, container, object string) { - for i := 10; i <= 0; i-- { - _, _, err := c.Object(container, object) - if err == swift.ObjectNotFound { - break - } - if i == 0 { - t.Fatalf("Expecting object %q/%q not found not: err=%v", container, object, err) - } - time.Sleep(1 * time.Second) - } -} - -func TestObjectDelete(t *testing.T) { - err := c.ObjectDelete(CONTAINER, OBJECT) - if err != nil { - t.Fatal(err) - } - testExistenceAfterDelete(t, CONTAINER, OBJECT) - err = c.ObjectDelete(CONTAINER, OBJECT) - if err != swift.ObjectNotFound { - t.Fatal("Expecting Object not found", err) - } -} - -func TestBulkDelete(t *testing.T) { - result, err := c.BulkDelete(CONTAINER, []string{OBJECT}) - if err == swift.Forbidden { - t.Log("Server doesn't support BulkDelete - skipping test") - return - } - if err != nil { - t.Fatal(err) - } - if result.NumberNotFound != 1 { - t.Error("Expected 1, actual:", result.NumberNotFound) - } - if result.NumberDeleted != 0 { - t.Error("Expected 0, actual:", result.NumberDeleted) - } - err = c.ObjectPutString(CONTAINER, OBJECT, CONTENTS, "") - if err != nil { - t.Fatal(err) - } - result, err = c.BulkDelete(CONTAINER, []string{OBJECT2, OBJECT}) - if err != nil { - t.Fatal(err) - } - if result.NumberNotFound != 1 { - t.Error("Expected 1, actual:", result.NumberNotFound) - } - if result.NumberDeleted != 1 { - t.Error("Expected 1, actual:", result.NumberDeleted) - } - t.Log("Errors:", result.Errors) -} - -func TestBulkUpload(t *testing.T) { - buffer := new(bytes.Buffer) - ds := tar.NewWriter(buffer) - var files = []struct{ Name, Body string }{ - {OBJECT, CONTENTS}, - {OBJECT2, CONTENTS2}, - } - for _, file := range files { - hdr := &tar.Header{ - Name: file.Name, - Size: int64(len(file.Body)), - } - if err := ds.WriteHeader(hdr); err != nil { - t.Fatal(err) - } - if _, err := ds.Write([]byte(file.Body)); err != nil { - t.Fatal(err) - } - } - if err := ds.Close(); err != nil { - t.Fatal(err) - } - - result, err := c.BulkUpload(CONTAINER, buffer, swift.UploadTar, nil) - if err == swift.Forbidden { - t.Log("Server doesn't support BulkUpload - skipping test") - return - } - if err != nil { - t.Fatal(err) - } - if result.NumberCreated != 2 { - t.Error("Expected 2, actual:", result.NumberCreated) - } - t.Log("Errors:", result.Errors) - - _, _, err = c.Object(CONTAINER, OBJECT) - if err != nil { - t.Error("Expecting object to be found") - } - _, _, err = c.Object(CONTAINER, OBJECT2) - if err != nil { - t.Error("Expecting object to be found") - } - c.ObjectDelete(CONTAINER, OBJECT) - c.ObjectDelete(CONTAINER, OBJECT2) -} - -func TestObjectDifficultName(t *testing.T) { - const name = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/` - err := c.ObjectPutString(CONTAINER, name, CONTENTS, "") - if err != nil { - t.Fatal(err) - } - objects, err := c.ObjectNamesAll(CONTAINER, nil) - if err != nil { - t.Error(err) - } - found := false - for _, object := range objects { - if object == name { - found = true - break - } - } - if !found { - t.Errorf("Couldn't find %q in listing %q", name, objects) - } - err = c.ObjectDelete(CONTAINER, name) - if err != nil { - t.Fatal(err) - } -} - -func TestContainerDelete(t *testing.T) { - err := c.ContainerDelete(CONTAINER) - if err != nil { - t.Fatal(err) - } - err = c.ContainerDelete(CONTAINER) - if err != swift.ContainerNotFound { - t.Fatal("Expecting container not found", err) - } - _, _, err = c.Container(CONTAINER) - if err != swift.ContainerNotFound { - t.Fatal("Expecting container not found", err) - } -} - -func TestUnAuthenticate(t *testing.T) { - c.UnAuthenticate() - if c.Authenticated() { - t.Fatal("Shouldn't be authenticated") - } - // Test re-authenticate - err := c.Authenticate() - if err != nil { - t.Fatal("ReAuth failed", err) - } - if !c.Authenticated() { - t.Fatal("Not authenticated") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swifttest/server.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swifttest/server.go deleted file mode 100644 index 78c07da4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/swifttest/server.go +++ /dev/null @@ -1,885 +0,0 @@ -// This implements a very basic Swift server -// Everything is stored in memory -// -// This comes from the https://github.com/mitchellh/goamz -// and was adapted for Swift -// -package swifttest - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "mime" - "net" - "net/http" - "net/url" - "path" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/ncw/swift" -) - -const ( - DEBUG = false -) - -type SwiftServer struct { - t *testing.T - reqId int - mu sync.Mutex - Listener net.Listener - AuthURL string - URL string - Containers map[string]*container - Accounts map[string]*account - Sessions map[string]*session -} - -// The Folder type represents a container stored in an account -type Folder struct { - Count int `json:"count"` - Bytes int `json:"bytes"` - Name string `json:"name"` -} - -// The Key type represents an item stored in an container. -type Key struct { - Key string `json:"name"` - LastModified string `json:"last_modified"` - Size int64 `json:"bytes"` - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string `json:"hash"` - ContentType string `json:"content_type"` - // Owner Owner -} - -type Subdir struct { - Subdir string `json:"subdir"` -} - -type swiftError struct { - statusCode int - Code string - Message string -} - -type action struct { - srv *SwiftServer - w http.ResponseWriter - req *http.Request - reqId string - user *account -} - -type session struct { - username string -} - -type metadata struct { - meta http.Header // metadata to return with requests. -} - -type account struct { - swift.Account - metadata - password string -} - -type object struct { - metadata - name string - mtime time.Time - checksum []byte // also held as ETag in meta. - data []byte - content_type string -} - -type container struct { - metadata - name string - ctime time.Time - objects map[string]*object - bytes int -} - -// A resource encapsulates the subject of an HTTP request. -// The resource referred to may or may not exist -// when the request is made. -type resource interface { - put(a *action) interface{} - get(a *action) interface{} - post(a *action) interface{} - delete(a *action) interface{} - copy(a *action) interface{} -} - -type objectResource struct { - name string - version string - container *container // always non-nil. - object *object // may be nil. -} - -type containerResource struct { - name string - container *container // non-nil if the container already exists. -} - -var responseParams = map[string]bool{ - "content-type": true, - "content-language": true, - "expires": true, - "cache-control": true, - "content-disposition": true, - "content-encoding": true, -} - -func fatalf(code int, codeStr string, errf string, a ...interface{}) { - panic(&swiftError{ - statusCode: code, - Code: codeStr, - Message: fmt.Sprintf(errf, a...), - }) -} - -func (m metadata) setMetadata(a *action, resource string) { - for key, values := range a.req.Header { - key = http.CanonicalHeaderKey(key) - if metaHeaders[key] || strings.HasPrefix(key, "X-"+strings.Title(resource)+"-Meta-") { - if values[0] != "" || resource == "object" { - m.meta[key] = values - } else { - m.meta.Del(key) - } - } - } -} - -func (m metadata) getMetadata(a *action) { - h := a.w.Header() - for name, d := range m.meta { - h[name] = d - } -} - -func (c container) list(delimiter string, marker string, prefix string, parent string) (resp []interface{}) { - var tmp orderedObjects - - // first get all matching objects and arrange them in alphabetical order. - for _, obj := range c.objects { - if strings.HasPrefix(obj.name, prefix) { - tmp = append(tmp, obj) - } - } - sort.Sort(tmp) - - var prefixes []string - for _, obj := range tmp { - if !strings.HasPrefix(obj.name, prefix) { - continue - } - - isPrefix := false - name := obj.name - if parent != "" { - if path.Dir(obj.name) != path.Clean(parent) { - continue - } - } else if delimiter != "" { - if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { - name = obj.name[:len(prefix)+i+len(delimiter)] - if prefixes != nil && prefixes[len(prefixes)-1] == name { - continue - } - isPrefix = true - } - } - - if name <= marker { - continue - } - - if isPrefix { - prefixes = append(prefixes, name) - - resp = append(resp, Subdir{ - Subdir: name, - }) - } else { - resp = append(resp, obj) - } - } - - return -} - -// GET on a container lists the objects in the container. -func (r containerResource) get(a *action) interface{} { - if r.container == nil { - fatalf(404, "NoSuchContainer", "The specified container does not exist") - } - - delimiter := a.req.Form.Get("delimiter") - marker := a.req.Form.Get("marker") - prefix := a.req.Form.Get("prefix") - format := a.req.URL.Query().Get("format") - parent := a.req.Form.Get("path") - - a.w.Header().Set("X-Container-Bytes-Used", strconv.Itoa(r.container.bytes)) - a.w.Header().Set("X-Container-Object-Count", strconv.Itoa(len(r.container.objects))) - r.container.getMetadata(a) - - if a.req.Method == "HEAD" { - return nil - } - - objects := r.container.list(delimiter, marker, prefix, parent) - - if format == "json" { - a.w.Header().Set("Content-Type", "application/json") - var resp []interface{} - for _, item := range objects { - if obj, ok := item.(*object); ok { - resp = append(resp, obj.Key()) - } else { - resp = append(resp, item) - } - } - return resp - } else { - for _, item := range objects { - if obj, ok := item.(*object); ok { - a.w.Write([]byte(obj.name + "\n")) - } else if subdir, ok := item.(Subdir); ok { - a.w.Write([]byte(subdir.Subdir + "\n")) - } - } - return nil - } -} - -// orderedContainers holds a slice of containers that can be sorted -// by name. -type orderedContainers []*container - -func (s orderedContainers) Len() int { - return len(s) -} -func (s orderedContainers) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s orderedContainers) Less(i, j int) bool { - return s[i].name < s[j].name -} - -func (r containerResource) delete(a *action) interface{} { - b := r.container - if b == nil { - fatalf(404, "NoSuchContainer", "The specified container does not exist") - } - if len(b.objects) > 0 { - fatalf(409, "Conflict", "The container you tried to delete is not empty") - } - delete(a.srv.Containers, b.name) - a.user.Containers-- - return nil -} - -func (r containerResource) put(a *action) interface{} { - if a.req.URL.Query().Get("extract-archive") != "" { - fatalf(403, "Operation forbidden", "Bulk upload is not supported") - } - - if r.container == nil { - if !validContainerName(r.name) { - fatalf(400, "InvalidContainerName", "The specified container is not valid") - } - r.container = &container{ - name: r.name, - objects: make(map[string]*object), - metadata: metadata{ - meta: make(http.Header), - }, - } - r.container.setMetadata(a, "container") - a.srv.Containers[r.name] = r.container - a.user.Containers++ - } - - return nil -} - -func (r containerResource) post(a *action) interface{} { - if r.container == nil { - fatalf(400, "Method", "The resource could not be found.") - } else { - r.container.setMetadata(a, "container") - a.w.WriteHeader(201) - jsonMarshal(a.w, Folder{ - Count: len(r.container.objects), - Bytes: r.container.bytes, - Name: r.container.name, - }) - } - return nil -} - -func (containerResource) copy(a *action) interface{} { return notAllowed() } - -// validContainerName returns whether name is a valid bucket name. -// Here are the rules, from: -// http://docs.openstack.org/api/openstack-object-storage/1.0/content/ch_object-storage-dev-api-storage.html -// -// Container names cannot exceed 256 bytes and cannot contain the / character. -// -func validContainerName(name string) bool { - if len(name) == 0 || len(name) > 256 { - return false - } - for _, r := range name { - switch { - case r == '/': - return false - default: - } - } - return true -} - -// orderedObjects holds a slice of objects that can be sorted -// by name. -type orderedObjects []*object - -func (s orderedObjects) Len() int { - return len(s) -} -func (s orderedObjects) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s orderedObjects) Less(i, j int) bool { - return s[i].name < s[j].name -} - -func (obj *object) Key() Key { - return Key{ - Key: obj.name, - LastModified: obj.mtime.Format("2006-01-02T15:04:05"), - Size: int64(len(obj.data)), - ETag: fmt.Sprintf("%x", obj.checksum), - ContentType: obj.content_type, - } -} - -var metaHeaders = map[string]bool{ - "Content-Type": true, - "Content-Encoding": true, - "Content-Disposition": true, - "X-Object-Manifest": true, -} - -var rangeRegexp = regexp.MustCompile("(bytes=)?([0-9]*)-([0-9]*)") - -// GET on an object gets the contents of the object. -func (objr objectResource) get(a *action) interface{} { - var ( - etag []byte - reader io.Reader - start int - end int = -1 - ) - obj := objr.object - if obj == nil { - fatalf(404, "Not Found", "The resource could not be found.") - } - - h := a.w.Header() - // add metadata - obj.getMetadata(a) - - if r := a.req.Header.Get("Range"); r != "" { - m := rangeRegexp.FindStringSubmatch(r) - if m[2] != "" { - start, _ = strconv.Atoi(m[2]) - } - if m[3] != "" { - end, _ = strconv.Atoi(m[3]) - } - } - - max := func(a int, b int) int { - if a > b { - return a - } - return b - } - - if manifest, ok := obj.meta["X-Object-Manifest"]; ok { - var segments []io.Reader - components := strings.SplitN(manifest[0], "/", 2) - segContainer := a.srv.Containers[components[0]] - prefix := components[1] - resp := segContainer.list("", "", prefix, "") - sum := md5.New() - cursor := 0 - size := 0 - for _, item := range resp { - if obj, ok := item.(*object); ok { - length := len(obj.data) - size += length - sum.Write([]byte(components[0] + "/" + obj.name + "\n")) - if start >= cursor+length { - continue - } - segments = append(segments, bytes.NewReader(obj.data[max(0, start-cursor):])) - cursor += length - } - } - etag = sum.Sum(nil) - if end == -1 { - end = size - } - reader = io.LimitReader(io.MultiReader(segments...), int64(end-start)) - } else { - if end == -1 { - end = len(obj.data) - } - etag = obj.checksum - reader = bytes.NewReader(obj.data[start:end]) - } - - h.Set("Content-Length", fmt.Sprint(end-start)) - h.Set("ETag", hex.EncodeToString(etag)) - h.Set("Last-Modified", obj.mtime.Format(http.TimeFormat)) - - if a.req.Method == "HEAD" { - return nil - } - - // TODO avoid holding the lock when writing data. - _, err := io.Copy(a.w, reader) - if err != nil { - // we can't do much except just log the fact. - log.Printf("error writing data: %v", err) - } - return nil -} - -// PUT on an object creates the object. -func (objr objectResource) put(a *action) interface{} { - var expectHash []byte - if c := a.req.Header.Get("ETag"); c != "" { - var err error - expectHash, err = hex.DecodeString(c) - if err != nil || len(expectHash) != md5.Size { - fatalf(400, "InvalidDigest", "The ETag you specified was invalid") - } - } - sum := md5.New() - // TODO avoid holding lock while reading data. - data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) - if err != nil { - fatalf(400, "TODO", "read error") - } - gotHash := sum.Sum(nil) - if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { - fatalf(422, "Bad ETag", "The ETag you specified did not match what we received") - } - if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { - fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") - } - - // TODO is this correct, or should we erase all previous metadata? - obj := objr.object - if obj == nil { - obj = &object{ - name: objr.name, - metadata: metadata{ - meta: make(http.Header), - }, - } - a.user.Objects++ - } else { - objr.container.bytes -= len(obj.data) - a.user.BytesUsed -= int64(len(obj.data)) - } - - var content_type string - if content_type = a.req.Header.Get("Content-Type"); content_type == "" { - content_type = mime.TypeByExtension(obj.name) - if content_type == "" { - content_type = "application/octet-stream" - } - } - - // PUT request has been successful - save data and metadata - obj.setMetadata(a, "object") - obj.content_type = content_type - obj.data = data - obj.checksum = gotHash - obj.mtime = time.Now().UTC() - objr.container.objects[objr.name] = obj - objr.container.bytes += len(data) - a.user.BytesUsed += int64(len(data)) - - h := a.w.Header() - h.Set("ETag", hex.EncodeToString(obj.checksum)) - - return nil -} - -func (objr objectResource) delete(a *action) interface{} { - if objr.object == nil { - fatalf(404, "NoSuchKey", "The specified key does not exist.") - } - - objr.container.bytes -= len(objr.object.data) - a.user.BytesUsed -= int64(len(objr.object.data)) - delete(objr.container.objects, objr.name) - a.user.Objects-- - return nil -} - -func (objr objectResource) post(a *action) interface{} { - obj := objr.object - obj.setMetadata(a, "object") - return nil -} - -func (objr objectResource) copy(a *action) interface{} { - if objr.object == nil { - fatalf(404, "NoSuchKey", "The specified key does not exist.") - } - - obj := objr.object - destination := a.req.Header.Get("Destination") - if destination == "" { - fatalf(400, "Bad Request", "You must provide a Destination header") - } - - var ( - obj2 *object - objr2 objectResource - ) - - destURL, _ := url.Parse("/v1/AUTH_tk/" + destination) - r := a.srv.resourceForURL(destURL) - switch t := r.(type) { - case objectResource: - objr2 = t - if objr2.object == nil { - obj2 = &object{ - name: objr2.name, - metadata: metadata{ - meta: make(http.Header), - }, - } - a.user.Objects++ - } else { - obj2 = objr2.object - objr2.container.bytes -= len(obj2.data) - a.user.BytesUsed -= int64(len(obj2.data)) - } - default: - fatalf(400, "Bad Request", "Destination must point to a valid object path") - } - - obj2.content_type = obj.content_type - obj2.data = obj.data - obj2.checksum = obj.checksum - obj2.mtime = time.Now() - objr2.container.objects[objr2.name] = obj2 - objr2.container.bytes += len(obj.data) - a.user.BytesUsed += int64(len(obj.data)) - - for key, values := range obj.metadata.meta { - obj2.metadata.meta[key] = values - } - obj2.setMetadata(a, "object") - - return nil -} - -func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { - // ignore error from ParseForm as it's usually spurious. - req.ParseForm() - - s.mu.Lock() - defer s.mu.Unlock() - - if DEBUG { - log.Printf("swifttest %q %q", req.Method, req.URL) - } - a := &action{ - srv: s, - w: w, - req: req, - reqId: fmt.Sprintf("%09X", s.reqId), - } - s.reqId++ - - var r resource - defer func() { - switch err := recover().(type) { - case *swiftError: - w.Header().Set("Content-Type", `text/plain; charset=utf-8`) - http.Error(w, err.Message, err.statusCode) - case nil: - default: - panic(err) - } - }() - - var resp interface{} - - if req.URL.String() == "/v1.0" { - username := req.Header.Get("x-auth-user") - key := req.Header.Get("x-auth-key") - if acct, ok := s.Accounts[username]; ok { - if acct.password == key { - r := make([]byte, 16) - _, _ = rand.Read(r) - id := fmt.Sprintf("%X", r) - w.Header().Set("X-Storage-Url", s.URL+"/AUTH_"+username) - w.Header().Set("X-Auth-Token", "AUTH_tk"+string(id)) - w.Header().Set("X-Storage-Token", "AUTH_tk"+string(id)) - s.Sessions[id] = &session{ - username: username, - } - return - } - } - panic(notAuthorized()) - } - - key := req.Header.Get("x-auth-token") - session, ok := s.Sessions[key[7:]] - if !ok { - panic(notAuthorized()) - } - - a.user = s.Accounts[session.username] - - r = s.resourceForURL(req.URL) - - switch req.Method { - case "PUT": - resp = r.put(a) - case "GET", "HEAD": - resp = r.get(a) - case "DELETE": - resp = r.delete(a) - case "POST": - resp = r.post(a) - case "COPY": - resp = r.copy(a) - default: - fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) - } - - content_type := req.Header.Get("Content-Type") - if resp != nil && req.Method != "HEAD" { - if strings.HasPrefix(content_type, "application/json") || - req.URL.Query().Get("format") == "json" { - jsonMarshal(w, resp) - } else { - switch r := resp.(type) { - case string: - w.Write([]byte(r)) - default: - w.Write(resp.([]byte)) - } - } - } -} - -func jsonMarshal(w io.Writer, x interface{}) { - if err := json.NewEncoder(w).Encode(x); err != nil { - panic(fmt.Errorf("error marshalling %#v: %v", x, err)) - } -} - -var pathRegexp = regexp.MustCompile("/v1/AUTH_[a-zA-Z0-9]+(/([^/]+)(/(.*))?)?") - -// resourceForURL returns a resource object for the given URL. -func (srv *SwiftServer) resourceForURL(u *url.URL) (r resource) { - m := pathRegexp.FindStringSubmatch(u.Path) - if m == nil { - fatalf(404, "InvalidURI", "Couldn't parse the specified URI") - } - containerName := m[2] - objectName := m[4] - if containerName == "" { - return rootResource{} - } - b := containerResource{ - name: containerName, - container: srv.Containers[containerName], - } - - if objectName == "" { - return b - } - - if b.container == nil { - fatalf(404, "NoSuchContainer", "The specified container does not exist") - } - - objr := objectResource{ - name: objectName, - version: u.Query().Get("versionId"), - container: b.container, - } - - if obj := objr.container.objects[objr.name]; obj != nil { - objr.object = obj - } - return objr -} - -// nullResource has error stubs for all resource methods. -type nullResource struct{} - -func notAllowed() interface{} { - fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") - return nil -} - -func notAuthorized() interface{} { - fatalf(401, "Unauthorized", "This server could not verify that you are authorized to access the document you requested.") - return nil -} - -func (nullResource) put(a *action) interface{} { return notAllowed() } -func (nullResource) get(a *action) interface{} { return notAllowed() } -func (nullResource) post(a *action) interface{} { return notAllowed() } -func (nullResource) delete(a *action) interface{} { return notAllowed() } -func (nullResource) copy(a *action) interface{} { return notAllowed() } - -type rootResource struct{} - -func (rootResource) put(a *action) interface{} { return notAllowed() } -func (rootResource) get(a *action) interface{} { - marker := a.req.Form.Get("marker") - prefix := a.req.Form.Get("prefix") - format := a.req.URL.Query().Get("format") - - h := a.w.Header() - - h.Set("X-Account-Bytes-Used", strconv.Itoa(int(a.user.BytesUsed))) - h.Set("X-Account-Container-Count", strconv.Itoa(int(a.user.Containers))) - h.Set("X-Account-Object-Count", strconv.Itoa(int(a.user.Objects))) - - // add metadata - a.user.metadata.getMetadata(a) - - if a.req.Method == "HEAD" { - return nil - } - - var tmp orderedContainers - // first get all matching objects and arrange them in alphabetical order. - for _, container := range a.srv.Containers { - if strings.HasPrefix(container.name, prefix) { - tmp = append(tmp, container) - } - } - sort.Sort(tmp) - - resp := make([]Folder, 0) - for _, container := range tmp { - if container.name <= marker { - continue - } - if format == "json" { - resp = append(resp, Folder{ - Count: len(container.objects), - Bytes: container.bytes, - Name: container.name, - }) - } else { - a.w.Write([]byte(container.name + "\n")) - } - } - - if format == "json" { - return resp - } else { - return nil - } -} - -func (r rootResource) post(a *action) interface{} { - a.user.metadata.setMetadata(a, "account") - return nil -} - -func (rootResource) delete(a *action) interface{} { - if a.req.URL.Query().Get("bulk-delete") == "1" { - fatalf(403, "Operation forbidden", "Bulk delete is not supported") - } - - return notAllowed() -} - -func (rootResource) copy(a *action) interface{} { return notAllowed() } - -func NewSwiftServer(address string) (*SwiftServer, error) { - var ( - l net.Listener - err error - ) - if strings.Index(address, ":") == -1 { - for port := 1024; port < 65535; port++ { - addr := fmt.Sprintf("%s:%d", address, port) - if l, err = net.Listen("tcp", addr); err == nil { - address = addr - break - } - } - } else { - l, err = net.Listen("tcp", address) - } - if err != nil { - return nil, fmt.Errorf("cannot listen on %s: %v", address, err) - } - - server := &SwiftServer{ - Listener: l, - AuthURL: "http://" + l.Addr().String() + "/v1.0", - URL: "http://" + l.Addr().String() + "/v1", - Containers: make(map[string]*container), - Accounts: make(map[string]*account), - Sessions: make(map[string]*session), - } - - server.Accounts["swifttest"] = &account{ - password: "swifttest", - metadata: metadata{ - meta: make(http.Header), - }, - } - - go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - server.serveHTTP(w, req) - })) - - return server, nil -} - -func (srv *SwiftServer) Close() { - srv.Listener.Close() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader.go deleted file mode 100644 index 3839e9ea..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader.go +++ /dev/null @@ -1,57 +0,0 @@ -package swift - -import ( - "io" - "time" -) - -// An io.ReadCloser which obeys an idle timeout -type timeoutReader struct { - reader io.ReadCloser - timeout time.Duration - cancel func() -} - -// Returns a wrapper around the reader which obeys an idle -// timeout. The cancel function is called if the timeout happens -func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader { - return &timeoutReader{ - reader: reader, - timeout: timeout, - cancel: cancel, - } -} - -// Read reads up to len(p) bytes into p -// -// Waits at most for timeout for the read to complete otherwise returns a timeout -func (t *timeoutReader) Read(p []byte) (int, error) { - // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? - // Do the read in the background - type result struct { - n int - err error - } - done := make(chan result, 1) - go func() { - n, err := t.reader.Read(p) - done <- result{n, err} - }() - // Wait for the read or the timeout - select { - case r := <-done: - return r.n, r.err - case <-time.After(t.timeout): - t.cancel() - return 0, TimeoutError - } - panic("unreachable") // for Go 1.0 -} - -// Close the channel -func (t *timeoutReader) Close() error { - return t.reader.Close() -} - -// Check it satisfies the interface -var _ io.ReadCloser = &timeoutReader{} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go deleted file mode 100644 index 2348617b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// This tests TimeoutReader - -package swift - -import ( - "io" - "io/ioutil" - "sync" - "testing" - "time" -) - -// An io.ReadCloser for testing -type testReader struct { - sync.Mutex - n int - delay time.Duration - closed bool -} - -// Returns n bytes with at time.Duration delay -func newTestReader(n int, delay time.Duration) *testReader { - return &testReader{ - n: n, - delay: delay, - } -} - -// Returns 1 byte at a time after delay -func (t *testReader) Read(p []byte) (n int, err error) { - if t.n <= 0 { - return 0, io.EOF - } - time.Sleep(t.delay) - p[0] = 'A' - t.Lock() - t.n-- - t.Unlock() - return 1, nil -} - -// Close the channel -func (t *testReader) Close() error { - t.Lock() - t.closed = true - t.Unlock() - return nil -} - -func TestTimeoutReaderNoTimeout(t *testing.T) { - test := newTestReader(3, 10*time.Millisecond) - cancelled := false - cancel := func() { - cancelled = true - } - tr := newTimeoutReader(test, 100*time.Millisecond, cancel) - b, err := ioutil.ReadAll(tr) - if err != nil || string(b) != "AAA" { - t.Fatalf("Bad read %s %s", err, b) - } - if cancelled { - t.Fatal("Cancelled when shouldn't have been") - } - if test.n != 0 { - t.Fatal("Didn't read all") - } - if test.closed { - t.Fatal("Shouldn't be closed") - } - tr.Close() - if !test.closed { - t.Fatal("Should be closed") - } -} - -func TestTimeoutReaderTimeout(t *testing.T) { - // Return those bytes slowly so we get an idle timeout - test := newTestReader(3, 100*time.Millisecond) - cancelled := false - cancel := func() { - cancelled = true - } - tr := newTimeoutReader(test, 10*time.Millisecond, cancel) - _, err := ioutil.ReadAll(tr) - if err != TimeoutError { - t.Fatal("Expecting TimeoutError, got", err) - } - if !cancelled { - t.Fatal("Not cancelled when should have been") - } - test.Lock() - n := test.n - test.Unlock() - if n == 0 { - t.Fatal("Read all") - } - if n != 3 { - t.Fatal("Didn't read any") - } - if test.closed { - t.Fatal("Shouldn't be closed") - } - tr.Close() - if !test.closed { - t.Fatal("Should be closed") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader.go deleted file mode 100644 index b12b1bbe..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader.go +++ /dev/null @@ -1,34 +0,0 @@ -package swift - -import ( - "io" - "time" -) - -// An io.Reader which resets a watchdog timer whenever data is read -type watchdogReader struct { - timeout time.Duration - reader io.Reader - timer *time.Timer -} - -// Returns a new reader which will kick the watchdog timer whenever data is read -func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader { - return &watchdogReader{ - timeout: timeout, - reader: reader, - timer: timer, - } -} - -// Read reads up to len(p) bytes into p -func (t *watchdogReader) Read(p []byte) (n int, err error) { - // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? - resetTimer(t.timer, t.timeout) - n, err = t.reader.Read(p) - resetTimer(t.timer, t.timeout) - return -} - -// Check it satisfies the interface -var _ io.Reader = &watchdogReader{} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go deleted file mode 100644 index 8b879d44..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// This tests WatchdogReader - -package swift - -import ( - "io/ioutil" - "testing" - "time" -) - -// Uses testReader from timeout_reader_test.go - -func testWatchdogReaderTimeout(t *testing.T, initialTimeout, watchdogTimeout time.Duration, expectedTimeout bool) { - test := newTestReader(3, 10*time.Millisecond) - timer := time.NewTimer(initialTimeout) - firedChan := make(chan bool) - started := make(chan bool) - go func() { - started <- true - select { - case <-timer.C: - firedChan <- true - } - }() - <-started - wr := newWatchdogReader(test, watchdogTimeout, timer) - b, err := ioutil.ReadAll(wr) - if err != nil || string(b) != "AAA" { - t.Fatalf("Bad read %s %s", err, b) - } - fired := false - select { - case fired = <-firedChan: - default: - } - if expectedTimeout { - if !fired { - t.Fatal("Timer should have fired") - } - } else { - if fired { - t.Fatal("Timer should not have fired") - } - } -} - -func TestWatchdogReaderNoTimeout(t *testing.T) { - testWatchdogReaderTimeout(t, 100*time.Millisecond, 100*time.Millisecond, false) -} - -func TestWatchdogReaderTimeout(t *testing.T) { - testWatchdogReaderTimeout(t, 5*time.Millisecond, 5*time.Millisecond, true) -} - -func TestWatchdogReaderNoTimeoutShortInitial(t *testing.T) { - testWatchdogReaderTimeout(t, 5*time.Millisecond, 100*time.Millisecond, false) -} - -func TestWatchdogReaderTimeoutLongInitial(t *testing.T) { - testWatchdogReaderTimeout(t, 100*time.Millisecond, 5*time.Millisecond, true) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/conn.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/conn.go deleted file mode 100644 index af3cfebe..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/conn.go +++ /dev/null @@ -1,300 +0,0 @@ -package rados - -// #cgo LDFLAGS: -lrados -// #include -// #include -import "C" - -import "unsafe" -import "bytes" - -// ClusterStat represents Ceph cluster statistics. -type ClusterStat struct { - Kb uint64 - Kb_used uint64 - Kb_avail uint64 - Num_objects uint64 -} - -// Conn is a connection handle to a Ceph cluster. -type Conn struct { - cluster C.rados_t -} - -// PingMonitor sends a ping to a monitor and returns the reply. -func (c *Conn) PingMonitor(id string) (string, error) { - c_id := C.CString(id) - defer C.free(unsafe.Pointer(c_id)) - - var strlen C.size_t - var strout *C.char - - ret := C.rados_ping_monitor(c.cluster, c_id, &strout, &strlen) - defer C.rados_buffer_free(strout) - - if ret == 0 { - reply := C.GoStringN(strout, (C.int)(strlen)) - return reply, nil - } else { - return "", RadosError(int(ret)) - } -} - -// Connect establishes a connection to a RADOS cluster. It returns an error, -// if any. -func (c *Conn) Connect() error { - ret := C.rados_connect(c.cluster) - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// Shutdown disconnects from the cluster. -func (c *Conn) Shutdown() { - C.rados_shutdown(c.cluster) -} - -// ReadConfigFile configures the connection using a Ceph configuration file. -func (c *Conn) ReadConfigFile(path string) error { - c_path := C.CString(path) - defer C.free(unsafe.Pointer(c_path)) - ret := C.rados_conf_read_file(c.cluster, c_path) - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// ReadDefaultConfigFile configures the connection using a Ceph configuration -// file located at default locations. -func (c *Conn) ReadDefaultConfigFile() error { - ret := C.rados_conf_read_file(c.cluster, nil) - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -func (c *Conn) OpenIOContext(pool string) (*IOContext, error) { - c_pool := C.CString(pool) - defer C.free(unsafe.Pointer(c_pool)) - ioctx := &IOContext{} - ret := C.rados_ioctx_create(c.cluster, c_pool, &ioctx.ioctx) - if ret == 0 { - return ioctx, nil - } else { - return nil, RadosError(int(ret)) - } -} - -// ListPools returns the names of all existing pools. -func (c *Conn) ListPools() (names []string, err error) { - buf := make([]byte, 4096) - for { - ret := int(C.rados_pool_list(c.cluster, - (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)))) - if ret < 0 { - return nil, RadosError(int(ret)) - } - - if ret > len(buf) { - buf = make([]byte, ret) - continue - } - - tmp := bytes.SplitAfter(buf[:ret-1], []byte{0}) - for _, s := range tmp { - if len(s) > 0 { - name := C.GoString((*C.char)(unsafe.Pointer(&s[0]))) - names = append(names, name) - } - } - - return names, nil - } -} - -// SetConfigOption sets the value of the configuration option identified by -// the given name. -func (c *Conn) SetConfigOption(option, value string) error { - c_opt, c_val := C.CString(option), C.CString(value) - defer C.free(unsafe.Pointer(c_opt)) - defer C.free(unsafe.Pointer(c_val)) - ret := C.rados_conf_set(c.cluster, c_opt, c_val) - if ret < 0 { - return RadosError(int(ret)) - } else { - return nil - } -} - -// GetConfigOption returns the value of the Ceph configuration option -// identified by the given name. -func (c *Conn) GetConfigOption(name string) (value string, err error) { - buf := make([]byte, 4096) - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_name)) - ret := int(C.rados_conf_get(c.cluster, c_name, - (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)))) - // FIXME: ret may be -ENAMETOOLONG if the buffer is not large enough. We - // can handle this case, but we need a reliable way to test for - // -ENAMETOOLONG constant. Will the syscall/Errno stuff in Go help? - if ret == 0 { - value = C.GoString((*C.char)(unsafe.Pointer(&buf[0]))) - return value, nil - } else { - return "", RadosError(ret) - } -} - -// WaitForLatestOSDMap blocks the caller until the latest OSD map has been -// retrieved. -func (c *Conn) WaitForLatestOSDMap() error { - ret := C.rados_wait_for_latest_osdmap(c.cluster) - if ret < 0 { - return RadosError(int(ret)) - } else { - return nil - } -} - -// GetClusterStat returns statistics about the cluster associated with the -// connection. -func (c *Conn) GetClusterStats() (stat ClusterStat, err error) { - c_stat := C.struct_rados_cluster_stat_t{} - ret := C.rados_cluster_stat(c.cluster, &c_stat) - if ret < 0 { - return ClusterStat{}, RadosError(int(ret)) - } else { - return ClusterStat{ - Kb: uint64(c_stat.kb), - Kb_used: uint64(c_stat.kb_used), - Kb_avail: uint64(c_stat.kb_avail), - Num_objects: uint64(c_stat.num_objects), - }, nil - } -} - -// ParseCmdLineArgs configures the connection from command line arguments. -func (c *Conn) ParseCmdLineArgs(args []string) error { - // add an empty element 0 -- Ceph treats the array as the actual contents - // of argv and skips the first element (the executable name) - argc := C.int(len(args) + 1) - argv := make([]*C.char, argc) - - // make the first element a string just in case it is ever examined - argv[0] = C.CString("placeholder") - defer C.free(unsafe.Pointer(argv[0])) - - for i, arg := range args { - argv[i+1] = C.CString(arg) - defer C.free(unsafe.Pointer(argv[i+1])) - } - - ret := C.rados_conf_parse_argv(c.cluster, argc, &argv[0]) - if ret < 0 { - return RadosError(int(ret)) - } else { - return nil - } -} - -// ParseDefaultConfigEnv configures the connection from the default Ceph -// environment variable(s). -func (c *Conn) ParseDefaultConfigEnv() error { - ret := C.rados_conf_parse_env(c.cluster, nil) - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// GetFSID returns the fsid of the cluster as a hexadecimal string. The fsid -// is a unique identifier of an entire Ceph cluster. -func (c *Conn) GetFSID() (fsid string, err error) { - buf := make([]byte, 37) - ret := int(C.rados_cluster_fsid(c.cluster, - (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)))) - // FIXME: the success case isn't documented correctly in librados.h - if ret == 36 { - fsid = C.GoString((*C.char)(unsafe.Pointer(&buf[0]))) - return fsid, nil - } else { - return "", RadosError(int(ret)) - } -} - -// GetInstanceID returns a globally unique identifier for the cluster -// connection instance. -func (c *Conn) GetInstanceID() uint64 { - // FIXME: are there any error cases for this? - return uint64(C.rados_get_instance_id(c.cluster)) -} - -// MakePool creates a new pool with default settings. -func (c *Conn) MakePool(name string) error { - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_name)) - ret := int(C.rados_pool_create(c.cluster, c_name)) - if ret == 0 { - return nil - } else { - return RadosError(ret) - } -} - -// DeletePool deletes a pool and all the data inside the pool. -func (c *Conn) DeletePool(name string) error { - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_name)) - ret := int(C.rados_pool_delete(c.cluster, c_name)) - if ret == 0 { - return nil - } else { - return RadosError(ret) - } -} - -// MonCommand sends a command to one of the monitors -func (c *Conn) MonCommand(args []byte) (buffer []byte, info string, err error) { - argv := make([]*C.char, len(args)) - for i, _ := range args { - argv[i] = (*C.char)(unsafe.Pointer(&args[i])) - } - - var ( - outs, outbuf *C.char - outslen, outbuflen C.size_t - ) - inbuf := C.CString("") - defer C.free(unsafe.Pointer(inbuf)) - - ret := C.rados_mon_command(c.cluster, - &argv[0], C.size_t(len(args)), - inbuf, // bulk input (e.g. crush map) - C.size_t(0), // length inbuf - &outbuf, // buffer - &outbuflen, // buffer length - &outs, // status string - &outslen) - - if outslen > 0 { - info = C.GoStringN(outs, C.int(outslen)) - C.free(unsafe.Pointer(outs)) - } - if outbuflen > 0 { - buffer = C.GoBytes(unsafe.Pointer(outbuf), C.int(outbuflen)) - C.free(unsafe.Pointer(outbuf)) - } - if ret != 0 { - err = RadosError(int(ret)) - return nil, info, err - } - - return -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/doc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/doc.go deleted file mode 100644 index 14babe93..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Set of wrappers around librados API. -*/ -package rados diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/ioctx.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/ioctx.go deleted file mode 100644 index ef67b4fb..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/ioctx.go +++ /dev/null @@ -1,547 +0,0 @@ -package rados - -// #cgo LDFLAGS: -lrados -// #include -// #include -import "C" - -import "unsafe" -import "time" - -// PoolStat represents Ceph pool statistics. -type PoolStat struct { - // space used in bytes - Num_bytes uint64 - // space used in KB - Num_kb uint64 - // number of objects in the pool - Num_objects uint64 - // number of clones of objects - Num_object_clones uint64 - // num_objects * num_replicas - Num_object_copies uint64 - Num_objects_missing_on_primary uint64 - // number of objects found on no OSDs - Num_objects_unfound uint64 - // number of objects replicated fewer times than they should be - // (but found on at least one OSD) - Num_objects_degraded uint64 - Num_rd uint64 - Num_rd_kb uint64 - Num_wr uint64 - Num_wr_kb uint64 -} - -// ObjectStat represents an object stat information -type ObjectStat struct { - // current length in bytes - Size uint64 - // last modification time - ModTime time.Time -} - -// IOContext represents a context for performing I/O within a pool. -type IOContext struct { - ioctx C.rados_ioctx_t -} - -// Pointer returns a uintptr representation of the IOContext. -func (ioctx *IOContext) Pointer() uintptr { - return uintptr(ioctx.ioctx) -} - -// Write writes len(data) bytes to the object with key oid starting at byte -// offset offset. It returns an error, if any. -func (ioctx *IOContext) Write(oid string, data []byte, offset uint64) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - ret := C.rados_write(ioctx.ioctx, c_oid, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data)), - (C.uint64_t)(offset)) - - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// Read reads up to len(data) bytes from the object with key oid starting at byte -// offset offset. It returns the number of bytes read and an error, if any. -func (ioctx *IOContext) Read(oid string, data []byte, offset uint64) (int, error) { - if len(data) == 0 { - return 0, nil - } - - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - ret := C.rados_read( - ioctx.ioctx, - c_oid, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data)), - (C.uint64_t)(offset)) - - if ret >= 0 { - return int(ret), nil - } else { - return 0, RadosError(int(ret)) - } -} - -// Delete deletes the object with key oid. It returns an error, if any. -func (ioctx *IOContext) Delete(oid string) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - ret := C.rados_remove(ioctx.ioctx, c_oid) - - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// Truncate resizes the object with key oid to size size. If the operation -// enlarges the object, the new area is logically filled with zeroes. If the -// operation shrinks the object, the excess data is removed. It returns an -// error, if any. -func (ioctx *IOContext) Truncate(oid string, size uint64) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - ret := C.rados_trunc(ioctx.ioctx, c_oid, (C.uint64_t)(size)) - - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// Destroy informs librados that the I/O context is no longer in use. -// Resources associated with the context may not be freed immediately, and the -// context should not be used again after calling this method. -func (ioctx *IOContext) Destroy() { - C.rados_ioctx_destroy(ioctx.ioctx) -} - -// Stat returns a set of statistics about the pool associated with this I/O -// context. -func (ioctx *IOContext) GetPoolStats() (stat PoolStat, err error) { - c_stat := C.struct_rados_pool_stat_t{} - ret := C.rados_ioctx_pool_stat(ioctx.ioctx, &c_stat) - if ret < 0 { - return PoolStat{}, RadosError(int(ret)) - } else { - return PoolStat{ - Num_bytes: uint64(c_stat.num_bytes), - Num_kb: uint64(c_stat.num_kb), - Num_objects: uint64(c_stat.num_objects), - Num_object_clones: uint64(c_stat.num_object_clones), - Num_object_copies: uint64(c_stat.num_object_copies), - Num_objects_missing_on_primary: uint64(c_stat.num_objects_missing_on_primary), - Num_objects_unfound: uint64(c_stat.num_objects_unfound), - Num_objects_degraded: uint64(c_stat.num_objects_degraded), - Num_rd: uint64(c_stat.num_rd), - Num_rd_kb: uint64(c_stat.num_rd_kb), - Num_wr: uint64(c_stat.num_wr), - Num_wr_kb: uint64(c_stat.num_wr_kb), - }, nil - } -} - -// GetPoolName returns the name of the pool associated with the I/O context. -func (ioctx *IOContext) GetPoolName() (name string, err error) { - buf := make([]byte, 128) - for { - ret := C.rados_ioctx_get_pool_name(ioctx.ioctx, - (*C.char)(unsafe.Pointer(&buf[0])), C.unsigned(len(buf))) - if ret == -34 { // FIXME - buf = make([]byte, len(buf)*2) - continue - } else if ret < 0 { - return "", RadosError(ret) - } - name = C.GoStringN((*C.char)(unsafe.Pointer(&buf[0])), ret) - return name, nil - } -} - -// ObjectListFunc is the type of the function called for each object visited -// by ListObjects. -type ObjectListFunc func(oid string) - -// ListObjects lists all of the objects in the pool associated with the I/O -// context, and called the provided listFn function for each object, passing -// to the function the name of the object. -func (ioctx *IOContext) ListObjects(listFn ObjectListFunc) error { - var ctx C.rados_list_ctx_t - ret := C.rados_objects_list_open(ioctx.ioctx, &ctx) - if ret < 0 { - return RadosError(ret) - } - defer func() { C.rados_objects_list_close(ctx) }() - - for { - var c_entry *C.char - ret := C.rados_objects_list_next(ctx, &c_entry, nil) - if ret == -2 { // FIXME - return nil - } else if ret < 0 { - return RadosError(ret) - } - listFn(C.GoString(c_entry)) - } - - panic("invalid state") -} - -// Stat returns the size of the object and its last modification time -func (ioctx *IOContext) Stat(object string) (stat ObjectStat, err error) { - var c_psize C.uint64_t - var c_pmtime C.time_t - c_object := C.CString(object) - defer C.free(unsafe.Pointer(c_object)) - - ret := C.rados_stat( - ioctx.ioctx, - c_object, - &c_psize, - &c_pmtime) - - if ret < 0 { - return ObjectStat{}, RadosError(int(ret)) - } else { - return ObjectStat{ - Size: uint64(c_psize), - ModTime: time.Unix(int64(c_pmtime), 0), - }, nil - } -} - -// GetXattr gets an xattr with key `name`, it returns the length of -// the key read or an error if not successful -func (ioctx *IOContext) GetXattr(object string, name string, data []byte) (int, error) { - c_object := C.CString(object) - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_object)) - defer C.free(unsafe.Pointer(c_name)) - - ret := C.rados_getxattr( - ioctx.ioctx, - c_object, - c_name, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data))) - - if ret >= 0 { - return int(ret), nil - } else { - return 0, RadosError(int(ret)) - } -} - -// Sets an xattr for an object with key `name` with value as `data` -func (ioctx *IOContext) SetXattr(object string, name string, data []byte) error { - c_object := C.CString(object) - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_object)) - defer C.free(unsafe.Pointer(c_name)) - - ret := C.rados_setxattr( - ioctx.ioctx, - c_object, - c_name, - (*C.char)(unsafe.Pointer(&data[0])), - (C.size_t)(len(data))) - - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// function that lists all the xattrs for an object, since xattrs are -// a k-v pair, this function returns a map of k-v pairs on -// success, error code on failure -func (ioctx *IOContext) ListXattrs(oid string) (map[string][]byte, error) { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - var it C.rados_xattrs_iter_t - - ret := C.rados_getxattrs(ioctx.ioctx, c_oid, &it) - if ret < 0 { - return nil, RadosError(ret) - } - defer func() { C.rados_getxattrs_end(it) }() - m := make(map[string][]byte) - for { - var c_name, c_val *C.char - var c_len C.size_t - defer C.free(unsafe.Pointer(c_name)) - defer C.free(unsafe.Pointer(c_val)) - - ret := C.rados_getxattrs_next(it, &c_name, &c_val, &c_len) - if ret < 0 { - return nil, RadosError(int(ret)) - } - // rados api returns a null name,val & 0-length upon - // end of iteration - if c_name == nil { - return m, nil // stop iteration - } - m[C.GoString(c_name)] = C.GoBytes(unsafe.Pointer(c_val), (C.int)(c_len)) - } -} - -// Remove an xattr with key `name` from object `oid` -func (ioctx *IOContext) RmXattr(oid string, name string) error { - c_oid := C.CString(oid) - c_name := C.CString(name) - defer C.free(unsafe.Pointer(c_oid)) - defer C.free(unsafe.Pointer(c_name)) - - ret := C.rados_rmxattr( - ioctx.ioctx, - c_oid, - c_name) - - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// Append the map `pairs` to the omap `oid` -func (ioctx *IOContext) SetOmap(oid string, pairs map[string][]byte) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - var s C.size_t - var c *C.char - ptrSize := unsafe.Sizeof(c) - - c_keys := C.malloc(C.size_t(len(pairs)) * C.size_t(ptrSize)) - c_values := C.malloc(C.size_t(len(pairs)) * C.size_t(ptrSize)) - c_lengths := C.malloc(C.size_t(len(pairs)) * C.size_t(unsafe.Sizeof(s))) - - defer C.free(unsafe.Pointer(c_keys)) - defer C.free(unsafe.Pointer(c_values)) - defer C.free(unsafe.Pointer(c_lengths)) - - i := 0 - for key, value := range pairs { - // key - c_key_ptr := (**C.char)(unsafe.Pointer(uintptr(c_keys) + uintptr(i) * ptrSize)) - *c_key_ptr = C.CString(key) - defer C.free(unsafe.Pointer(*c_key_ptr)) - - // value and its length - c_value_ptr := (**C.char)(unsafe.Pointer(uintptr(c_values) + uintptr(i) * ptrSize)) - - var c_length C.size_t - if len(value) > 0 { - *c_value_ptr = (*C.char)(unsafe.Pointer(&value[0])) - c_length = C.size_t(len(value)) - } else { - *c_value_ptr = nil - c_length = C.size_t(0) - } - - c_length_ptr := (*C.size_t)(unsafe.Pointer(uintptr(c_lengths) + uintptr(i) * ptrSize)) - *c_length_ptr = c_length - - i++ - } - - op := C.rados_create_write_op() - C.rados_write_op_omap_set( - op, - (**C.char)(c_keys), - (**C.char)(c_values), - (*C.size_t)(c_lengths), - C.size_t(len(pairs))) - - ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0) - C.rados_release_write_op(op) - - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// OmapListFunc is the type of the function called for each omap key -// visited by ListOmapValues -type OmapListFunc func(key string, value []byte) - -// Iterate on a set of keys and their values from an omap -// `startAfter`: iterate only on the keys after this specified one -// `filterPrefix`: iterate only on the keys beginning with this prefix -// `maxReturn`: iterate no more than `maxReturn` key/value pairs -// `listFn`: the function called at each iteration -func (ioctx *IOContext) ListOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64, listFn OmapListFunc) error { - c_oid := C.CString(oid) - c_start_after := C.CString(startAfter) - c_filter_prefix := C.CString(filterPrefix) - c_max_return := C.uint64_t(maxReturn) - - defer C.free(unsafe.Pointer(c_oid)) - defer C.free(unsafe.Pointer(c_start_after)) - defer C.free(unsafe.Pointer(c_filter_prefix)) - - op := C.rados_create_read_op() - - var c_iter C.rados_omap_iter_t - var c_prval C.int - C.rados_read_op_omap_get_vals( - op, - c_start_after, - c_filter_prefix, - c_max_return, - &c_iter, - &c_prval, - ) - - ret := C.rados_read_op_operate(op, ioctx.ioctx, c_oid, 0) - - if int(c_prval) != 0 { - return RadosError(int(c_prval)) - } else if int(ret) != 0 { - return RadosError(int(ret)) - } - - for { - var c_key *C.char - var c_val *C.char - var c_len C.size_t - - ret = C.rados_omap_get_next(c_iter, &c_key, &c_val, &c_len) - - if int(ret) != 0 { - return RadosError(int(ret)) - } - - if c_key == nil { - break - } - - listFn(C.GoString(c_key), C.GoBytes(unsafe.Pointer(c_val), C.int(c_len))) - } - - C.rados_omap_get_end(c_iter) - C.rados_release_read_op(op) - - return nil -} - -// Fetch a set of keys and their values from an omap and returns then as a map -// `startAfter`: retrieve only the keys after this specified one -// `filterPrefix`: retrieve only the keys beginning with this prefix -// `maxReturn`: retrieve no more than `maxReturn` key/value pairs -func (ioctx *IOContext) GetOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64) (map[string][]byte, error) { - omap := map[string][]byte{} - - err := ioctx.ListOmapValues( - oid, startAfter, filterPrefix, maxReturn, - func(key string, value []byte) { - omap[key] = value - }, - ) - - return omap, err -} - -// Fetch all the keys and their values from an omap and returns then as a map -// `startAfter`: retrieve only the keys after this specified one -// `filterPrefix`: retrieve only the keys beginning with this prefix -// `iteratorSize`: internal number of keys to fetch during a read operation -func (ioctx *IOContext) GetAllOmapValues(oid string, startAfter string, filterPrefix string, iteratorSize int64) (map[string][]byte, error) { - omap := map[string][]byte{} - omapSize := 0 - - for { - err := ioctx.ListOmapValues( - oid, startAfter, filterPrefix, iteratorSize, - func (key string, value []byte) { - omap[key] = value - startAfter = key - }, - ) - - if err != nil { - return omap, err - } - - // End of omap - if len(omap) == omapSize { - break - } - - omapSize = len(omap) - } - - return omap, nil -} - -// Remove the specified `keys` from the omap `oid` -func (ioctx *IOContext) RmOmapKeys(oid string, keys []string) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - var c *C.char - ptrSize := unsafe.Sizeof(c) - - c_keys := C.malloc(C.size_t(len(keys)) * C.size_t(ptrSize)) - defer C.free(unsafe.Pointer(c_keys)) - - i := 0 - for _, key := range keys { - c_key_ptr := (**C.char)(unsafe.Pointer(uintptr(c_keys) + uintptr(i) * ptrSize)) - *c_key_ptr = C.CString(key) - defer C.free(unsafe.Pointer(*c_key_ptr)) - i++ - } - - op := C.rados_create_write_op() - C.rados_write_op_omap_rm_keys( - op, - (**C.char)(c_keys), - C.size_t(len(keys))) - - ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0) - C.rados_release_write_op(op) - - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} - -// Clear the omap `oid` -func (ioctx *IOContext) CleanOmap(oid string) error { - c_oid := C.CString(oid) - defer C.free(unsafe.Pointer(c_oid)) - - op := C.rados_create_write_op() - C.rados_write_op_omap_clear(op) - - ret := C.rados_write_op_operate(op, ioctx.ioctx, c_oid, nil, 0) - C.rados_release_write_op(op) - - if ret == 0 { - return nil - } else { - return RadosError(int(ret)) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/rados.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/rados.go deleted file mode 100644 index 935bc248..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/rados.go +++ /dev/null @@ -1,54 +0,0 @@ -package rados - -// #cgo LDFLAGS: -lrados -// #include -// #include -import "C" - -import ( - "fmt" - "unsafe" -) - -type RadosError int - -func (e RadosError) Error() string { - return fmt.Sprintf("rados: ret=%d", e) -} - -// Version returns the major, minor, and patch components of the version of -// the RADOS library linked against. -func Version() (int, int, int) { - var c_major, c_minor, c_patch C.int - C.rados_version(&c_major, &c_minor, &c_patch) - return int(c_major), int(c_minor), int(c_patch) -} - -// NewConn creates a new connection object. It returns the connection and an -// error, if any. -func NewConn() (*Conn, error) { - conn := &Conn{} - ret := C.rados_create(&conn.cluster, nil) - - if ret == 0 { - return conn, nil - } else { - return nil, RadosError(int(ret)) - } -} - -// NewConnWithUser creates a new connection object with a custom username. -// It returns the connection and an error, if any. -func NewConnWithUser(user string) (*Conn, error) { - c_user := C.CString(user) - defer C.free(unsafe.Pointer(c_user)) - - conn := &Conn{} - ret := C.rados_create(&conn.cluster, c_user) - - if ret == 0 { - return conn, nil - } else { - return nil, RadosError(int(ret)) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/rados_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/rados_test.go deleted file mode 100644 index a31c1872..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/noahdesu/go-ceph/rados/rados_test.go +++ /dev/null @@ -1,703 +0,0 @@ -package rados_test - -import "testing" - -//import "bytes" -import "github.com/noahdesu/go-ceph/rados" -import "github.com/stretchr/testify/assert" -import "os" -import "os/exec" -import "io" -import "io/ioutil" -import "time" -import "net" -import "fmt" -import "sort" -import "encoding/json" - -func GetUUID() string { - out, _ := exec.Command("uuidgen").Output() - return string(out[:36]) -} - -func TestVersion(t *testing.T) { - var major, minor, patch = rados.Version() - assert.False(t, major < 0 || major > 1000, "invalid major") - assert.False(t, minor < 0 || minor > 1000, "invalid minor") - assert.False(t, patch < 0 || patch > 1000, "invalid patch") -} - -func TestGetSetConfigOption(t *testing.T) { - conn, _ := rados.NewConn() - - // rejects invalid options - err := conn.SetConfigOption("wefoijweojfiw", "welfkwjelkfj") - assert.Error(t, err, "Invalid option") - - // verify SetConfigOption changes a values - log_file_val, err := conn.GetConfigOption("log_file") - assert.NotEqual(t, log_file_val, "/dev/null") - - err = conn.SetConfigOption("log_file", "/dev/null") - assert.NoError(t, err, "Invalid option") - - log_file_val, err = conn.GetConfigOption("log_file") - assert.Equal(t, log_file_val, "/dev/null") -} - -func TestParseDefaultConfigEnv(t *testing.T) { - conn, _ := rados.NewConn() - - log_file_val, _ := conn.GetConfigOption("log_file") - assert.NotEqual(t, log_file_val, "/dev/null") - - err := os.Setenv("CEPH_ARGS", "--log-file /dev/null") - assert.NoError(t, err) - - err = conn.ParseDefaultConfigEnv() - assert.NoError(t, err) - - log_file_val, _ = conn.GetConfigOption("log_file") - assert.Equal(t, log_file_val, "/dev/null") -} - -func TestParseCmdLineArgs(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - - mon_host_val, _ := conn.GetConfigOption("mon_host") - assert.NotEqual(t, mon_host_val, "1.1.1.1") - - args := []string{"--mon-host", "1.1.1.1"} - err := conn.ParseCmdLineArgs(args) - assert.NoError(t, err) - - mon_host_val, _ = conn.GetConfigOption("mon_host") - assert.Equal(t, mon_host_val, "1.1.1.1") -} - -func TestGetClusterStats(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - poolname := GetUUID() - err := conn.MakePool(poolname) - assert.NoError(t, err) - - pool, err := conn.OpenIOContext(poolname) - assert.NoError(t, err) - - // grab current stats - prev_stat, err := conn.GetClusterStats() - fmt.Printf("prev_stat: %+v\n", prev_stat) - assert.NoError(t, err) - - // make some changes to the cluster - buf := make([]byte, 1<<20) - for i := 0; i < 10; i++ { - objname := GetUUID() - pool.Write(objname, buf, 0) - } - - // wait a while for the stats to change - for i := 0; i < 30; i++ { - stat, err := conn.GetClusterStats() - assert.NoError(t, err) - - // wait for something to change - if stat == prev_stat { - fmt.Printf("curr_stat: %+v (trying again...)\n", stat) - time.Sleep(time.Second) - } else { - // success - fmt.Printf("curr_stat: %+v (change detected)\n", stat) - conn.Shutdown() - return - } - } - - pool.Destroy() - conn.Shutdown() - t.Error("Cluster stats aren't changing") -} - -func TestGetFSID(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - fsid, err := conn.GetFSID() - assert.NoError(t, err) - assert.NotEqual(t, fsid, "") - - conn.Shutdown() -} - -func TestGetInstanceID(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - id := conn.GetInstanceID() - assert.NotEqual(t, id, 0) - - conn.Shutdown() -} - -func TestMakeDeletePool(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - // get current list of pool - pools, err := conn.ListPools() - assert.NoError(t, err) - - // check that new pool name is unique - new_name := GetUUID() - for _, poolname := range pools { - if new_name == poolname { - t.Error("Random pool name exists!") - return - } - } - - // create pool - err = conn.MakePool(new_name) - assert.NoError(t, err) - - // get updated list of pools - pools, err = conn.ListPools() - assert.NoError(t, err) - - // verify that the new pool name exists - found := false - for _, poolname := range pools { - if new_name == poolname { - found = true - } - } - - if !found { - t.Error("Cannot find newly created pool") - } - - // delete the pool - err = conn.DeletePool(new_name) - assert.NoError(t, err) - - // verify that it is gone - - // get updated list of pools - pools, err = conn.ListPools() - assert.NoError(t, err) - - // verify that the new pool name exists - found = false - for _, poolname := range pools { - if new_name == poolname { - found = true - } - } - - if found { - t.Error("Deleted pool still exists") - } - - conn.Shutdown() -} - -func TestPingMonitor(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - // mon id that should work with vstart.sh - reply, err := conn.PingMonitor("a") - if err == nil { - assert.NotEqual(t, reply, "") - return - } - - // mon id that should work with micro-osd.sh - reply, err = conn.PingMonitor("0") - if err == nil { - assert.NotEqual(t, reply, "") - return - } - - // try to use a hostname as the monitor id - mon_addr, _ := conn.GetConfigOption("mon_host") - hosts, _ := net.LookupAddr(mon_addr) - for _, host := range hosts { - reply, err := conn.PingMonitor(host) - if err == nil { - assert.NotEqual(t, reply, "") - return - } - } - - t.Error("Could not find a valid monitor id") - - conn.Shutdown() -} - -func TestReadConfigFile(t *testing.T) { - conn, _ := rados.NewConn() - - // check current log_file value - log_file_val, err := conn.GetConfigOption("log_file") - assert.NoError(t, err) - assert.NotEqual(t, log_file_val, "/dev/null") - - // create a temporary ceph.conf file that changes the log_file conf - // option. - file, err := ioutil.TempFile("/tmp", "go-rados") - assert.NoError(t, err) - - _, err = io.WriteString(file, "[global]\nlog_file = /dev/null\n") - assert.NoError(t, err) - - // parse the config file - err = conn.ReadConfigFile(file.Name()) - assert.NoError(t, err) - - // check current log_file value - log_file_val, err = conn.GetConfigOption("log_file") - assert.NoError(t, err) - assert.Equal(t, log_file_val, "/dev/null") - - // cleanup - file.Close() - os.Remove(file.Name()) -} - -func TestWaitForLatestOSDMap(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - err := conn.WaitForLatestOSDMap() - assert.NoError(t, err) - - conn.Shutdown() -} - -func TestReadWrite(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - // make pool - pool_name := GetUUID() - err := conn.MakePool(pool_name) - assert.NoError(t, err) - - pool, err := conn.OpenIOContext(pool_name) - assert.NoError(t, err) - - bytes_in := []byte("input data") - err = pool.Write("obj", bytes_in, 0) - assert.NoError(t, err) - - bytes_out := make([]byte, len(bytes_in)) - n_out, err := pool.Read("obj", bytes_out, 0) - - assert.Equal(t, n_out, len(bytes_in)) - assert.Equal(t, bytes_in, bytes_out) - - pool.Destroy() -} - -func TestObjectStat(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - pool_name := GetUUID() - err := conn.MakePool(pool_name) - assert.NoError(t, err) - - pool, err := conn.OpenIOContext(pool_name) - assert.NoError(t, err) - - bytes_in := []byte("input data") - err = pool.Write("obj", bytes_in, 0) - assert.NoError(t, err) - - stat, err := pool.Stat("obj") - assert.Equal(t, uint64(len(bytes_in)), stat.Size) - assert.NotNil(t, stat.ModTime) - - pool.Destroy() - conn.Shutdown() -} - -func TestGetPoolStats(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - poolname := GetUUID() - err := conn.MakePool(poolname) - assert.NoError(t, err) - - pool, err := conn.OpenIOContext(poolname) - assert.NoError(t, err) - - // grab current stats - prev_stat, err := pool.GetPoolStats() - fmt.Printf("prev_stat: %+v\n", prev_stat) - assert.NoError(t, err) - - // make some changes to the cluster - buf := make([]byte, 1<<20) - for i := 0; i < 10; i++ { - objname := GetUUID() - pool.Write(objname, buf, 0) - } - - // wait a while for the stats to change - for i := 0; i < 30; i++ { - stat, err := pool.GetPoolStats() - assert.NoError(t, err) - - // wait for something to change - if stat == prev_stat { - fmt.Printf("curr_stat: %+v (trying again...)\n", stat) - time.Sleep(time.Second) - } else { - // success - fmt.Printf("curr_stat: %+v (change detected)\n", stat) - conn.Shutdown() - return - } - } - - pool.Destroy() - conn.Shutdown() - t.Error("Pool stats aren't changing") -} - -func TestGetPoolName(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - poolname := GetUUID() - err := conn.MakePool(poolname) - assert.NoError(t, err) - - ioctx, err := conn.OpenIOContext(poolname) - assert.NoError(t, err) - - poolname_ret, err := ioctx.GetPoolName() - assert.NoError(t, err) - - assert.Equal(t, poolname, poolname_ret) - - ioctx.Destroy() - conn.Shutdown() -} - -func TestMonCommand(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - command, err := json.Marshal(map[string]string{"prefix": "df", "format": "json"}) - assert.NoError(t, err) - - buf, info, err := conn.MonCommand(command) - assert.NoError(t, err) - assert.Equal(t, info, "") - - var message map[string]interface{} - err = json.Unmarshal(buf, &message) - assert.NoError(t, err) - - conn.Shutdown() -} - -func TestObjectIterator(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - poolname := GetUUID() - err := conn.MakePool(poolname) - assert.NoError(t, err) - - ioctx, err := conn.OpenIOContext(poolname) - assert.NoError(t, err) - - objectList := []string{} - err = ioctx.ListObjects(func(oid string) { - objectList = append(objectList, oid) - }) - assert.NoError(t, err) - assert.True(t, len(objectList) == 0) - - createdList := []string{} - for i := 0; i < 200; i++ { - oid := GetUUID() - bytes_in := []byte("input data") - err = ioctx.Write(oid, bytes_in, 0) - assert.NoError(t, err) - createdList = append(createdList, oid) - } - assert.True(t, len(createdList) == 200) - - err = ioctx.ListObjects(func(oid string) { - objectList = append(objectList, oid) - }) - assert.NoError(t, err) - assert.Equal(t, len(objectList), len(createdList)) - - sort.Strings(objectList) - sort.Strings(createdList) - - assert.Equal(t, objectList, createdList) -} - -func TestNewConnWithUser(t *testing.T) { - _, err := rados.NewConnWithUser("admin") - assert.Equal(t, err, nil) -} - -func TestReadWriteXattr(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - // make pool - pool_name := GetUUID() - err := conn.MakePool(pool_name) - assert.NoError(t, err) - - pool, err := conn.OpenIOContext(pool_name) - assert.NoError(t, err) - - bytes_in := []byte("input data") - err = pool.Write("obj", bytes_in, 0) - assert.NoError(t, err) - - my_xattr_in := []byte("my_value") - err = pool.SetXattr("obj", "my_key", my_xattr_in) - assert.NoError(t, err) - - my_xattr_out := make([]byte, len(my_xattr_in)) - n_out, err := pool.GetXattr("obj", "my_key", my_xattr_out) - - assert.Equal(t, n_out, len(my_xattr_in)) - assert.Equal(t, my_xattr_in, my_xattr_out) - - pool.Destroy() -} - -func TestListXattrs(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - // make pool - pool_name := GetUUID() - err := conn.MakePool(pool_name) - assert.NoError(t, err) - - pool, err := conn.OpenIOContext(pool_name) - assert.NoError(t, err) - - bytes_in := []byte("input data") - err = pool.Write("obj", bytes_in, 0) - assert.NoError(t, err) - - input_xattrs := make(map[string][]byte) - for i := 0; i < 200; i++ { - name := fmt.Sprintf("key_%d", i) - data := []byte(GetUUID()) - err = pool.SetXattr("obj", name, data) - assert.NoError(t, err) - input_xattrs[name] = data - } - - output_xattrs := make(map[string][]byte) - output_xattrs, err = pool.ListXattrs("obj") - assert.NoError(t, err) - assert.Equal(t, len(input_xattrs), len(output_xattrs)) - assert.Equal(t, input_xattrs, output_xattrs) - - pool.Destroy() -} - -func TestRmXattr(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - pool_name := GetUUID() - err := conn.MakePool(pool_name) - assert.NoError(t, err) - - pool, err := conn.OpenIOContext(pool_name) - assert.NoError(t, err) - - bytes_in := []byte("input data") - err = pool.Write("obj", bytes_in, 0) - assert.NoError(t, err) - - key := "key1" - val := []byte("val1") - err = pool.SetXattr("obj", key, val) - assert.NoError(t, err) - - key = "key2" - val = []byte("val2") - err = pool.SetXattr("obj", key, val) - assert.NoError(t, err) - - xattr_list := make(map[string][]byte) - xattr_list, err = pool.ListXattrs("obj") - assert.NoError(t, err) - assert.Equal(t, len(xattr_list), 2) - - pool.RmXattr("obj", "key2") - xattr_list, err = pool.ListXattrs("obj") - assert.NoError(t, err) - assert.Equal(t, len(xattr_list), 1) - - found := false - for key, _ = range xattr_list { - if key == "key2" { - found = true - } - - } - - if found { - t.Error("Deleted pool still exists") - } - - pool.Destroy() -} - -func TestReadWriteOmap(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - pool_name := GetUUID() - err := conn.MakePool(pool_name) - assert.NoError(t, err) - - pool, err := conn.OpenIOContext(pool_name) - assert.NoError(t, err) - - // Set - orig := map[string][]byte{ - "key1": []byte("value1"), - "key2": []byte("value2"), - "prefixed-key3": []byte("value3"), - "empty": []byte(""), - } - - err = pool.SetOmap("obj", orig) - assert.NoError(t, err) - - // List - remaining := map[string][]byte{} - for k, v := range orig { - remaining[k] = v - } - - err = pool.ListOmapValues("obj", "", "", 4, func(key string, value []byte) { - assert.Equal(t, remaining[key], value) - delete(remaining, key) - }) - assert.NoError(t, err) - assert.Equal(t, 0, len(remaining)) - - // Get (with a fixed number of keys) - fetched, err := pool.GetOmapValues("obj", "", "", 4) - assert.NoError(t, err) - assert.Equal(t, orig, fetched) - - // Get All (with an iterator size bigger than the map size) - fetched, err = pool.GetAllOmapValues("obj", "", "", 100) - assert.NoError(t, err) - assert.Equal(t, orig, fetched) - - // Get All (with an iterator size smaller than the map size) - fetched, err = pool.GetAllOmapValues("obj", "", "", 1) - assert.NoError(t, err) - assert.Equal(t, orig, fetched) - - // Remove - err = pool.RmOmapKeys("obj", []string{"key1", "prefixed-key3"}) - assert.NoError(t, err) - - fetched, err = pool.GetOmapValues("obj", "", "", 4) - assert.NoError(t, err) - assert.Equal(t, map[string][]byte{ - "key2": []byte("value2"), - "empty": []byte(""), - }, fetched) - - // Clear - err = pool.CleanOmap("obj") - assert.NoError(t, err) - - fetched, err = pool.GetOmapValues("obj", "", "", 4) - assert.NoError(t, err) - assert.Equal(t, map[string][]byte{}, fetched) - - pool.Destroy() -} - -func TestReadFilterOmap(t *testing.T) { - conn, _ := rados.NewConn() - conn.ReadDefaultConfigFile() - conn.Connect() - - pool_name := GetUUID() - err := conn.MakePool(pool_name) - assert.NoError(t, err) - - pool, err := conn.OpenIOContext(pool_name) - assert.NoError(t, err) - - orig := map[string][]byte{ - "key1": []byte("value1"), - "prefixed-key3": []byte("value3"), - "key2": []byte("value2"), - } - - err = pool.SetOmap("obj", orig) - assert.NoError(t, err) - - // filter by prefix - fetched, err := pool.GetOmapValues("obj", "", "prefixed", 4) - assert.NoError(t, err) - assert.Equal(t, map[string][]byte{ - "prefixed-key3": []byte("value3"), - }, fetched) - - // "start_after" a key - fetched, err = pool.GetOmapValues("obj", "key1", "", 4) - assert.NoError(t, err) - assert.Equal(t, map[string][]byte{ - "prefixed-key3": []byte("value3"), - "key2": []byte("value2"), - }, fetched) - - // maxReturn - fetched, err = pool.GetOmapValues("obj", "", "key", 1) - assert.NoError(t, err) - assert.Equal(t, map[string][]byte{ - "key1": []byte("value1"), - }, fetched) - - pool.Destroy() -} - diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/.gitignore b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/LICENSE deleted file mode 100644 index 2815cc36..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/README.md deleted file mode 100644 index d2d3fb89..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# go-crypto -A Subset of the Go `crypto` Package with a Resumable Hash Interface - -### Documentation - -GoDocs: http://godoc.org/github.com/stevvooe/resumable diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/resumable.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/resumable.go deleted file mode 100644 index af4488f1..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/resumable.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package resumable registers resumable versions of hash functions. Resumable -// varieties of hash functions are available via the standard crypto package. -// Support can be checked by type assertion against the resumable.Hash -// interface. -// -// While one can use these sub-packages directly, it makes more sense to -// register them using side-effect imports: -// -// import _ "github.com/stevvooe/resumable/sha256" -// -// This will make the resumable hashes available to the application through -// the standard crypto package. For example, if a new sha256 is required, one -// should use the following: -// -// h := crypto.SHA256.New() -// -// Such a features allows one to control the inclusion of resumable hash -// support in a single file. Applications that require the resumable hash -// implementation can type switch to detect support, while other parts of the -// application can be completely oblivious to the presence of the alternative -// hash functions. -// -// Also note that the implementations available in this package are completely -// untouched from their Go counterparts in the standard library. Only an extra -// file is added to each package to implement the extra resumable hash -// functions. -package resumable - -import "hash" - -// Hash is the common interface implemented by all resumable hash functions. -type Hash interface { - hash.Hash - - // Len returns the number of bytes written to the Hash so far. - Len() int64 - - // State returns a snapshot of the state of the Hash. - State() ([]byte, error) - - // Restore resets the Hash to the given state. - Restore(state []byte) error -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go deleted file mode 100644 index 426d78ad..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/resume.go +++ /dev/null @@ -1,53 +0,0 @@ -package sha256 - -import ( - "bytes" - "encoding/gob" - - // import to ensure that our init function runs after the standard package - _ "crypto/sha256" -) - -// Len returns the number of bytes which have been written to the digest. -func (d *digest) Len() int64 { - return int64(d.len) -} - -// State returns a snapshot of the state of the digest. -func (d *digest) State() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - - // We encode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - d.h, d.x, d.nx, d.len, d.is224, - } - - for _, val := range vals { - if err := encoder.Encode(val); err != nil { - return nil, err - } - } - - return buf.Bytes(), nil -} - -// Restore resets the digest to the given state. -func (d *digest) Restore(state []byte) error { - decoder := gob.NewDecoder(bytes.NewReader(state)) - - // We decode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - &d.h, &d.x, &d.nx, &d.len, &d.is224, - } - - for _, val := range vals { - if err := decoder.Decode(val); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256.go deleted file mode 100644 index d84cebf2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined -// in FIPS 180-4. -package sha256 - -import ( - "crypto" - "hash" -) - -func init() { - crypto.RegisterHash(crypto.SHA224, New224) - crypto.RegisterHash(crypto.SHA256, New) -} - -// The size of a SHA256 checksum in bytes. -const Size = 32 - -// The size of a SHA224 checksum in bytes. -const Size224 = 28 - -// The blocksize of SHA256 and SHA224 in bytes. -const BlockSize = 64 - -const ( - chunk = 64 - init0 = 0x6A09E667 - init1 = 0xBB67AE85 - init2 = 0x3C6EF372 - init3 = 0xA54FF53A - init4 = 0x510E527F - init5 = 0x9B05688C - init6 = 0x1F83D9AB - init7 = 0x5BE0CD19 - init0_224 = 0xC1059ED8 - init1_224 = 0x367CD507 - init2_224 = 0x3070DD17 - init3_224 = 0xF70E5939 - init4_224 = 0xFFC00B31 - init5_224 = 0x68581511 - init6_224 = 0x64F98FA7 - init7_224 = 0xBEFA4FA4 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - h [8]uint32 - x [chunk]byte - nx int - len uint64 - is224 bool // mark if this digest is SHA-224 -} - -func (d *digest) Reset() { - if !d.is224 { - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - } else { - d.h[0] = init0_224 - d.h[1] = init1_224 - d.h[2] = init2_224 - d.h[3] = init3_224 - d.h[4] = init4_224 - d.h[5] = init5_224 - d.h[6] = init6_224 - d.h[7] = init7_224 - } - d.nx = 0 - d.len = 0 -} - -// New returns a new hash.Hash computing the SHA256 checksum. -func New() hash.Hash { - d := new(digest) - d.Reset() - return d -} - -// New224 returns a new hash.Hash computing the SHA224 checksum. -func New224() hash.Hash { - d := new(digest) - d.is224 = true - d.Reset() - return d -} - -func (d *digest) Size() int { - if !d.is224 { - return Size - } - return Size224 -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - block(d, d.x[:]) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - block(d, p[:n]) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d0 *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d := *d0 - hash := d.checkSum() - if d.is224 { - return append(in, hash[:Size224]...) - } - return append(in, hash[:]...) -} - -func (d *digest) checkSum() [Size]byte { - len := d.len - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - var tmp [64]byte - tmp[0] = 0x80 - if len%64 < 56 { - d.Write(tmp[0 : 56-len%64]) - } else { - d.Write(tmp[0 : 64+56-len%64]) - } - - // Length in bits. - len <<= 3 - for i := uint(0); i < 8; i++ { - tmp[i] = byte(len >> (56 - 8*i)) - } - d.Write(tmp[0:8]) - - if d.nx != 0 { - panic("d.nx != 0") - } - - h := d.h[:] - if d.is224 { - h = d.h[:7] - } - - var digest [Size]byte - for i, s := range h { - digest[i*4] = byte(s >> 24) - digest[i*4+1] = byte(s >> 16) - digest[i*4+2] = byte(s >> 8) - digest[i*4+3] = byte(s) - } - - return digest -} - -// Sum256 returns the SHA256 checksum of the data. -func Sum256(data []byte) [Size]byte { - var d digest - d.Reset() - d.Write(data) - return d.checkSum() -} - -// Sum224 returns the SHA224 checksum of the data. -func Sum224(data []byte) (sum224 [Size224]byte) { - var d digest - d.is224 = true - d.Reset() - d.Write(data) - sum := d.checkSum() - copy(sum224[:], sum[:Size224]) - return -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256_test.go deleted file mode 100644 index 1d883d39..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256_test.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// SHA256 hash algorithm. See FIPS 180-2. - -package sha256 - -import ( - "fmt" - "io" - "testing" -) - -type sha256Test struct { - out string - in string -} - -var golden = []sha256Test{ - {"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", ""}, - {"ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb", "a"}, - {"fb8e20fc2e4c3f248c60c39bd652f3c1347298bb977b8b4d5903b85055620603", "ab"}, - {"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "abc"}, - {"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", "abcd"}, - {"36bbe50ed96841d10443bcb670d6554f0a34b761be67ec9c4a8ad2c0c44ca42c", "abcde"}, - {"bef57ec7f53a6d40beb640a780a639c83bc29ac8a9816f1fc6c5c6dcd93c4721", "abcdef"}, - {"7d1a54127b222502f5b79b5fb0803061152a44f92b37e23c6527baf665d4da9a", "abcdefg"}, - {"9c56cc51b374c3ba189210d5b6d4bf57790d351c96c47c02190ecf1e430635ab", "abcdefgh"}, - {"19cc02f26df43cc571bc9ed7b0c4d29224a3ec229529221725ef76d021c8326f", "abcdefghi"}, - {"72399361da6a7754fec986dca5b7cbaf1c810a28ded4abaf56b2106d06cb78b0", "abcdefghij"}, - {"a144061c271f152da4d151034508fed1c138b8c976339de229c3bb6d4bbb4fce", "Discard medicine more than two years old."}, - {"6dae5caa713a10ad04b46028bf6dad68837c581616a1589a265a11288d4bb5c4", "He who has a shady past knows that nice guys finish last."}, - {"ae7a702a9509039ddbf29f0765e70d0001177914b86459284dab8b348c2dce3f", "I wouldn't marry him with a ten foot pole."}, - {"6748450b01c568586715291dfa3ee018da07d36bb7ea6f180c1af6270215c64f", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, - {"14b82014ad2b11f661b5ae6a99b75105c2ffac278cd071cd6c05832793635774", "The days of the digital watch are numbered. -Tom Stoppard"}, - {"7102cfd76e2e324889eece5d6c41921b1e142a4ac5a2692be78803097f6a48d8", "Nepal premier won't resign."}, - {"23b1018cd81db1d67983c5f7417c44da9deb582459e378d7a068552ea649dc9f", "For every action there is an equal and opposite government program."}, - {"8001f190dfb527261c4cfcab70c98e8097a7a1922129bc4096950e57c7999a5a", "His money is twice tainted: 'taint yours and 'taint mine."}, - {"8c87deb65505c3993eb24b7a150c4155e82eee6960cf0c3a8114ff736d69cad5", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, - {"bfb0a67a19cdec3646498b2e0f751bddc41bba4b7f30081b0b932aad214d16d7", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, - {"7f9a0b9bf56332e19f5a0ec1ad9c1425a153da1c624868fda44561d6b74daf36", "size: a.out: bad magic"}, - {"b13f81b8aad9e3666879af19886140904f7f429ef083286195982a7588858cfc", "The major problem is with sendmail. -Mark Horton"}, - {"b26c38d61519e894480c70c8374ea35aa0ad05b2ae3d6674eec5f52a69305ed4", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, - {"049d5e26d4f10222cd841a119e38bd8d2e0d1129728688449575d4ff42b842c1", "If the enemy is within range, then so are you."}, - {"0e116838e3cc1c1a14cd045397e29b4d087aa11b0853fc69ec82e90330d60949", "It's well we cannot hear the screams/That we create in others' dreams."}, - {"4f7d8eb5bcf11de2a56b971021a444aa4eafd6ecd0f307b5109e4e776cd0fe46", "You remind me of a TV show, but that's all right: I watch it anyway."}, - {"61c0cc4c4bd8406d5120b3fb4ebc31ce87667c162f29468b3c779675a85aebce", "C is as portable as Stonehedge!!"}, - {"1fb2eb3688093c4a3f80cd87a5547e2ce940a4f923243a79a2a1e242220693ac", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, - {"395585ce30617b62c80b93e8208ce866d4edc811a177fdb4b82d3911d8696423", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, - {"4f9b189a13d030838269dce846b16a1ce9ce81fe63e65de2f636863336a98fe6", "How can you write a big system without C++? -Paul Glick"}, -} - -var golden224 = []sha256Test{ - {"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", ""}, - {"abd37534c7d9a2efb9465de931cd7055ffdb8879563ae98078d6d6d5", "a"}, - {"db3cda86d4429a1d39c148989566b38f7bda0156296bd364ba2f878b", "ab"}, - {"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7", "abc"}, - {"a76654d8e3550e9a2d67a0eeb6c67b220e5885eddd3fde135806e601", "abcd"}, - {"bdd03d560993e675516ba5a50638b6531ac2ac3d5847c61916cfced6", "abcde"}, - {"7043631cb415556a275a4ebecb802c74ee9f6153908e1792a90b6a98", "abcdef"}, - {"d1884e711701ad81abe0c77a3b0ea12e19ba9af64077286c72fc602d", "abcdefg"}, - {"17eb7d40f0356f8598e89eafad5f6c759b1f822975d9c9b737c8a517", "abcdefgh"}, - {"aeb35915346c584db820d2de7af3929ffafef9222a9bcb26516c7334", "abcdefghi"}, - {"d35e1e5af29ddb0d7e154357df4ad9842afee527c689ee547f753188", "abcdefghij"}, - {"19297f1cef7ddc8a7e947f5c5a341e10f7245045e425db67043988d7", "Discard medicine more than two years old."}, - {"0f10c2eb436251f777fbbd125e260d36aecf180411726c7c885f599a", "He who has a shady past knows that nice guys finish last."}, - {"4d1842104919f314cad8a3cd20b3cba7e8ed3e7abed62b57441358f6", "I wouldn't marry him with a ten foot pole."}, - {"a8ba85c6fe0c48fbffc72bbb2f03fcdbc87ae2dc7a56804d1590fb3b", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, - {"5543fbab26e67e8885b1a852d567d1cb8b9bfe42e0899584c50449a9", "The days of the digital watch are numbered. -Tom Stoppard"}, - {"65ca107390f5da9efa05d28e57b221657edc7e43a9a18fb15b053ddb", "Nepal premier won't resign."}, - {"84953962be366305a9cc9b5cd16ed019edc37ac96c0deb3e12cca116", "For every action there is an equal and opposite government program."}, - {"35a189ce987151dfd00b3577583cc6a74b9869eecf894459cb52038d", "His money is twice tainted: 'taint yours and 'taint mine."}, - {"2fc333713983edfd4ef2c0da6fb6d6415afb94987c91e4069eb063e6", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, - {"cbe32d38d577a1b355960a4bc3c659c2dc4670859a19777a875842c4", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, - {"a2dc118ce959e027576413a7b440c875cdc8d40df9141d6ef78a57e1", "size: a.out: bad magic"}, - {"d10787e24052bcff26dc484787a54ed819e4e4511c54890ee977bf81", "The major problem is with sendmail. -Mark Horton"}, - {"62efcf16ab8a893acdf2f348aaf06b63039ff1bf55508c830532c9fb", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, - {"3e9b7e4613c59f58665104c5fa86c272db5d3a2ff30df5bb194a5c99", "If the enemy is within range, then so are you."}, - {"5999c208b8bdf6d471bb7c359ac5b829e73a8211dff686143a4e7f18", "It's well we cannot hear the screams/That we create in others' dreams."}, - {"3b2d67ff54eabc4ef737b14edf87c64280ef582bcdf2a6d56908b405", "You remind me of a TV show, but that's all right: I watch it anyway."}, - {"d0733595d20e4d3d6b5c565a445814d1bbb2fd08b9a3b8ffb97930c6", "C is as portable as Stonehedge!!"}, - {"43fb8aeed8a833175c9295c1165415f98c866ef08a4922959d673507", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, - {"ec18e66e93afc4fb1604bc2baedbfd20b44c43d76e65c0996d7851c6", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, - {"86ed2eaa9c75ba98396e5c9fb2f679ecf0ea2ed1e0ee9ceecb4a9332", "How can you write a big system without C++? -Paul Glick"}, -} - -func TestGolden(t *testing.T) { - for i := 0; i < len(golden); i++ { - g := golden[i] - s := fmt.Sprintf("%x", Sum256([]byte(g.in))) - if s != g.out { - t.Fatalf("Sum256 function: sha256(%s) = %s want %s", g.in, s, g.out) - } - c := New() - for j := 0; j < 3; j++ { - if j < 2 { - io.WriteString(c, g.in) - } else { - io.WriteString(c, g.in[0:len(g.in)/2]) - c.Sum(nil) - io.WriteString(c, g.in[len(g.in)/2:]) - } - s := fmt.Sprintf("%x", c.Sum(nil)) - if s != g.out { - t.Fatalf("sha256[%d](%s) = %s want %s", j, g.in, s, g.out) - } - c.Reset() - } - } - for i := 0; i < len(golden224); i++ { - g := golden224[i] - s := fmt.Sprintf("%x", Sum224([]byte(g.in))) - if s != g.out { - t.Fatalf("Sum224 function: sha224(%s) = %s want %s", g.in, s, g.out) - } - c := New224() - for j := 0; j < 3; j++ { - if j < 2 { - io.WriteString(c, g.in) - } else { - io.WriteString(c, g.in[0:len(g.in)/2]) - c.Sum(nil) - io.WriteString(c, g.in[len(g.in)/2:]) - } - s := fmt.Sprintf("%x", c.Sum(nil)) - if s != g.out { - t.Fatalf("sha224[%d](%s) = %s want %s", j, g.in, s, g.out) - } - c.Reset() - } - } -} - -func TestSize(t *testing.T) { - c := New() - if got := c.Size(); got != Size { - t.Errorf("Size = %d; want %d", got, Size) - } - c = New224() - if got := c.Size(); got != Size224 { - t.Errorf("New224.Size = %d; want %d", got, Size224) - } -} - -func TestBlockSize(t *testing.T) { - c := New() - if got := c.BlockSize(); got != BlockSize { - t.Errorf("BlockSize = %d want %d", got, BlockSize) - } -} - -var bench = New() -var buf = make([]byte, 8192) - -func benchmarkSize(b *testing.B, size int) { - b.SetBytes(int64(size)) - sum := make([]byte, bench.Size()) - for i := 0; i < b.N; i++ { - bench.Reset() - bench.Write(buf[:size]) - bench.Sum(sum[:0]) - } -} - -func BenchmarkHash8Bytes(b *testing.B) { - benchmarkSize(b, 8) -} - -func BenchmarkHash1K(b *testing.B) { - benchmarkSize(b, 1024) -} - -func BenchmarkHash8K(b *testing.B) { - benchmarkSize(b, 8192) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block.go deleted file mode 100644 index ca5efd15..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !386,!amd64 - -// SHA256 block step. -// In its own file so that a faster assembly or C version -// can be substituted easily. - -package sha256 - -var _K = []uint32{ - 0x428a2f98, - 0x71374491, - 0xb5c0fbcf, - 0xe9b5dba5, - 0x3956c25b, - 0x59f111f1, - 0x923f82a4, - 0xab1c5ed5, - 0xd807aa98, - 0x12835b01, - 0x243185be, - 0x550c7dc3, - 0x72be5d74, - 0x80deb1fe, - 0x9bdc06a7, - 0xc19bf174, - 0xe49b69c1, - 0xefbe4786, - 0x0fc19dc6, - 0x240ca1cc, - 0x2de92c6f, - 0x4a7484aa, - 0x5cb0a9dc, - 0x76f988da, - 0x983e5152, - 0xa831c66d, - 0xb00327c8, - 0xbf597fc7, - 0xc6e00bf3, - 0xd5a79147, - 0x06ca6351, - 0x14292967, - 0x27b70a85, - 0x2e1b2138, - 0x4d2c6dfc, - 0x53380d13, - 0x650a7354, - 0x766a0abb, - 0x81c2c92e, - 0x92722c85, - 0xa2bfe8a1, - 0xa81a664b, - 0xc24b8b70, - 0xc76c51a3, - 0xd192e819, - 0xd6990624, - 0xf40e3585, - 0x106aa070, - 0x19a4c116, - 0x1e376c08, - 0x2748774c, - 0x34b0bcb5, - 0x391c0cb3, - 0x4ed8aa4a, - 0x5b9cca4f, - 0x682e6ff3, - 0x748f82ee, - 0x78a5636f, - 0x84c87814, - 0x8cc70208, - 0x90befffa, - 0xa4506ceb, - 0xbef9a3f7, - 0xc67178f2, -} - -func block(dig *digest, p []byte) { - var w [64]uint32 - h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] - for len(p) >= chunk { - // Can interlace the computation of w with the - // rounds below if needed for speed. - for i := 0; i < 16; i++ { - j := i * 4 - w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) - } - for i := 16; i < 64; i++ { - v1 := w[i-2] - t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) - v2 := w[i-15] - t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) - w[i] = t1 + w[i-7] + t2 + w[i-16] - } - - a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 - - for i := 0; i < 64; i++ { - t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] - - t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) - - h = g - g = f - f = e - e = d + t1 - d = c - c = b - b = a - a = t1 + t2 - } - - h0 += a - h1 += b - h2 += c - h3 += d - h4 += e - h5 += f - h6 += g - h7 += h - - p = p[chunk:] - } - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_386.s b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_386.s deleted file mode 100644 index 73ae2bf3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_386.s +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// SHA256 block routine. See sha256block.go for Go equivalent. -// -// The algorithm is detailed in FIPS 180-4: -// -// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf -// -// Wt = Mt; for 0 <= t <= 15 -// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// -// a = H0 -// b = H1 -// c = H2 -// d = H3 -// e = H4 -// f = H5 -// g = H6 -// h = H7 -// -// for t = 0 to 63 { -// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt -// T2 = BIGSIGMA0(a) + Maj(a,b,c) -// h = g -// g = f -// f = e -// e = d + T1 -// d = c -// c = b -// b = a -// a = T1 + T2 -// } -// -// H0 = a + H0 -// H1 = b + H1 -// H2 = c + H2 -// H3 = d + H3 -// H4 = e + H4 -// H5 = f + H5 -// H6 = g + H6 -// H7 = h + H7 - -// Wt = Mt; for 0 <= t <= 15 -#define MSGSCHEDULE0(index) \ - MOVL (index*4)(SI), AX; \ - BSWAPL AX; \ - MOVL AX, (index*4)(BP) - -// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) -// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) -#define MSGSCHEDULE1(index) \ - MOVL ((index-2)*4)(BP), AX; \ - MOVL AX, CX; \ - RORL $17, AX; \ - MOVL CX, DX; \ - RORL $19, CX; \ - SHRL $10, DX; \ - MOVL ((index-15)*4)(BP), BX; \ - XORL CX, AX; \ - MOVL BX, CX; \ - XORL DX, AX; \ - RORL $7, BX; \ - MOVL CX, DX; \ - SHRL $3, DX; \ - RORL $18, CX; \ - ADDL ((index-7)*4)(BP), AX; \ - XORL CX, BX; \ - XORL DX, BX; \ - ADDL ((index-16)*4)(BP), BX; \ - ADDL BX, AX; \ - MOVL AX, ((index)*4)(BP) - -// Calculate T1 in AX - uses AX, BX, CX and DX registers. -// Wt is passed in AX. -// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt -// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) -// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) -#define SHA256T1(const, e, f, g, h) \ - MOVL (h*4)(DI), BX; \ - ADDL AX, BX; \ - MOVL (e*4)(DI), AX; \ - ADDL $const, BX; \ - MOVL (e*4)(DI), CX; \ - RORL $6, AX; \ - MOVL (e*4)(DI), DX; \ - RORL $11, CX; \ - XORL CX, AX; \ - MOVL (e*4)(DI), CX; \ - RORL $25, DX; \ - ANDL (f*4)(DI), CX; \ - XORL AX, DX; \ - MOVL (e*4)(DI), AX; \ - NOTL AX; \ - ADDL DX, BX; \ - ANDL (g*4)(DI), AX; \ - XORL CX, AX; \ - ADDL BX, AX - -// Calculate T2 in BX - uses AX, BX, CX and DX registers. -// T2 = BIGSIGMA0(a) + Maj(a, b, c) -// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) -// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) -#define SHA256T2(a, b, c) \ - MOVL (a*4)(DI), AX; \ - MOVL (c*4)(DI), BX; \ - RORL $2, AX; \ - MOVL (a*4)(DI), DX; \ - ANDL (b*4)(DI), BX; \ - RORL $13, DX; \ - MOVL (a*4)(DI), CX; \ - ANDL (c*4)(DI), CX; \ - XORL DX, AX; \ - XORL CX, BX; \ - MOVL (a*4)(DI), DX; \ - MOVL (b*4)(DI), CX; \ - RORL $22, DX; \ - ANDL (a*4)(DI), CX; \ - XORL CX, BX; \ - XORL DX, AX; \ - ADDL AX, BX - -// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. -// The values for e and a are stored in d and h, ready for rotation. -#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ - SHA256T1(const, e, f, g, h); \ - MOVL AX, 292(SP); \ - SHA256T2(a, b, c); \ - MOVL 292(SP), AX; \ - ADDL AX, BX; \ - ADDL AX, (d*4)(DI); \ - MOVL BX, (h*4)(DI) - -#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE0(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE1(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -TEXT ·block(SB),0,$296-12 - MOVL p_base+4(FP), SI - MOVL p_len+8(FP), DX - SHRL $6, DX - SHLL $6, DX - - LEAL (SI)(DX*1), DI - MOVL DI, 288(SP) - CMPL SI, DI - JEQ end - - LEAL 256(SP), DI // variables - - MOVL dig+0(FP), BP - MOVL (0*4)(BP), AX // a = H0 - MOVL AX, (0*4)(DI) - MOVL (1*4)(BP), BX // b = H1 - MOVL BX, (1*4)(DI) - MOVL (2*4)(BP), CX // c = H2 - MOVL CX, (2*4)(DI) - MOVL (3*4)(BP), DX // d = H3 - MOVL DX, (3*4)(DI) - MOVL (4*4)(BP), AX // e = H4 - MOVL AX, (4*4)(DI) - MOVL (5*4)(BP), BX // f = H5 - MOVL BX, (5*4)(DI) - MOVL (6*4)(BP), CX // g = H6 - MOVL CX, (6*4)(DI) - MOVL (7*4)(BP), DX // h = H7 - MOVL DX, (7*4)(DI) - -loop: - MOVL SP, BP // message schedule - - SHA256ROUND0(0, 0x428a2f98, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND0(1, 0x71374491, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND0(2, 0xb5c0fbcf, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND0(3, 0xe9b5dba5, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND0(4, 0x3956c25b, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND0(5, 0x59f111f1, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND0(6, 0x923f82a4, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND0(7, 0xab1c5ed5, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND0(8, 0xd807aa98, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND0(9, 0x12835b01, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND0(10, 0x243185be, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND0(11, 0x550c7dc3, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND0(12, 0x72be5d74, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND0(13, 0x80deb1fe, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND0(14, 0x9bdc06a7, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND0(15, 0xc19bf174, 1, 2, 3, 4, 5, 6, 7, 0) - - SHA256ROUND1(16, 0xe49b69c1, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(17, 0xefbe4786, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(18, 0x0fc19dc6, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(19, 0x240ca1cc, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(20, 0x2de92c6f, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(21, 0x4a7484aa, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(22, 0x5cb0a9dc, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(23, 0x76f988da, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(24, 0x983e5152, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(25, 0xa831c66d, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(26, 0xb00327c8, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(27, 0xbf597fc7, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(28, 0xc6e00bf3, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(29, 0xd5a79147, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(30, 0x06ca6351, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(31, 0x14292967, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(32, 0x27b70a85, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(33, 0x2e1b2138, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(34, 0x4d2c6dfc, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(35, 0x53380d13, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(36, 0x650a7354, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(37, 0x766a0abb, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(38, 0x81c2c92e, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(39, 0x92722c85, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(40, 0xa2bfe8a1, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(41, 0xa81a664b, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(42, 0xc24b8b70, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(43, 0xc76c51a3, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(44, 0xd192e819, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(45, 0xd6990624, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(46, 0xf40e3585, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(47, 0x106aa070, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(48, 0x19a4c116, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(49, 0x1e376c08, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(50, 0x2748774c, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(51, 0x34b0bcb5, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(52, 0x391c0cb3, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(53, 0x4ed8aa4a, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(54, 0x5b9cca4f, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(55, 0x682e6ff3, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(56, 0x748f82ee, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(57, 0x78a5636f, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(58, 0x84c87814, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(59, 0x8cc70208, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(60, 0x90befffa, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(61, 0xa4506ceb, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(62, 0xbef9a3f7, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(63, 0xc67178f2, 1, 2, 3, 4, 5, 6, 7, 0) - - MOVL dig+0(FP), BP - MOVL (0*4)(BP), AX // H0 = a + H0 - ADDL (0*4)(DI), AX - MOVL AX, (0*4)(DI) - MOVL AX, (0*4)(BP) - MOVL (1*4)(BP), BX // H1 = b + H1 - ADDL (1*4)(DI), BX - MOVL BX, (1*4)(DI) - MOVL BX, (1*4)(BP) - MOVL (2*4)(BP), CX // H2 = c + H2 - ADDL (2*4)(DI), CX - MOVL CX, (2*4)(DI) - MOVL CX, (2*4)(BP) - MOVL (3*4)(BP), DX // H3 = d + H3 - ADDL (3*4)(DI), DX - MOVL DX, (3*4)(DI) - MOVL DX, (3*4)(BP) - MOVL (4*4)(BP), AX // H4 = e + H4 - ADDL (4*4)(DI), AX - MOVL AX, (4*4)(DI) - MOVL AX, (4*4)(BP) - MOVL (5*4)(BP), BX // H5 = f + H5 - ADDL (5*4)(DI), BX - MOVL BX, (5*4)(DI) - MOVL BX, (5*4)(BP) - MOVL (6*4)(BP), CX // H6 = g + H6 - ADDL (6*4)(DI), CX - MOVL CX, (6*4)(DI) - MOVL CX, (6*4)(BP) - MOVL (7*4)(BP), DX // H7 = h + H7 - ADDL (7*4)(DI), DX - MOVL DX, (7*4)(DI) - MOVL DX, (7*4)(BP) - - ADDL $64, SI - CMPL SI, 288(SP) - JB loop - -end: - RET diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_amd64.s b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_amd64.s deleted file mode 100644 index 868eaed4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_amd64.s +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// SHA256 block routine. See sha256block.go for Go equivalent. -// -// The algorithm is detailed in FIPS 180-4: -// -// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf -// -// Wt = Mt; for 0 <= t <= 15 -// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// -// a = H0 -// b = H1 -// c = H2 -// d = H3 -// e = H4 -// f = H5 -// g = H6 -// h = H7 -// -// for t = 0 to 63 { -// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt -// T2 = BIGSIGMA0(a) + Maj(a,b,c) -// h = g -// g = f -// f = e -// e = d + T1 -// d = c -// c = b -// b = a -// a = T1 + T2 -// } -// -// H0 = a + H0 -// H1 = b + H1 -// H2 = c + H2 -// H3 = d + H3 -// H4 = e + H4 -// H5 = f + H5 -// H6 = g + H6 -// H7 = h + H7 - -// Wt = Mt; for 0 <= t <= 15 -#define MSGSCHEDULE0(index) \ - MOVL (index*4)(SI), AX; \ - BSWAPL AX; \ - MOVL AX, (index*4)(BP) - -// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) -// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) -#define MSGSCHEDULE1(index) \ - MOVL ((index-2)*4)(BP), AX; \ - MOVL AX, CX; \ - RORL $17, AX; \ - MOVL CX, DX; \ - RORL $19, CX; \ - SHRL $10, DX; \ - MOVL ((index-15)*4)(BP), BX; \ - XORL CX, AX; \ - MOVL BX, CX; \ - XORL DX, AX; \ - RORL $7, BX; \ - MOVL CX, DX; \ - SHRL $3, DX; \ - RORL $18, CX; \ - ADDL ((index-7)*4)(BP), AX; \ - XORL CX, BX; \ - XORL DX, BX; \ - ADDL ((index-16)*4)(BP), BX; \ - ADDL BX, AX; \ - MOVL AX, ((index)*4)(BP) - -// Calculate T1 in AX - uses AX, CX and DX registers. -// h is also used as an accumulator. Wt is passed in AX. -// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt -// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) -// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) -#define SHA256T1(const, e, f, g, h) \ - ADDL AX, h; \ - MOVL e, AX; \ - ADDL $const, h; \ - MOVL e, CX; \ - RORL $6, AX; \ - MOVL e, DX; \ - RORL $11, CX; \ - XORL CX, AX; \ - MOVL e, CX; \ - RORL $25, DX; \ - ANDL f, CX; \ - XORL AX, DX; \ - MOVL e, AX; \ - NOTL AX; \ - ADDL DX, h; \ - ANDL g, AX; \ - XORL CX, AX; \ - ADDL h, AX - -// Calculate T2 in BX - uses BX, CX, DX and DI registers. -// T2 = BIGSIGMA0(a) + Maj(a, b, c) -// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) -// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) -#define SHA256T2(a, b, c) \ - MOVL a, DI; \ - MOVL c, BX; \ - RORL $2, DI; \ - MOVL a, DX; \ - ANDL b, BX; \ - RORL $13, DX; \ - MOVL a, CX; \ - ANDL c, CX; \ - XORL DX, DI; \ - XORL CX, BX; \ - MOVL a, DX; \ - MOVL b, CX; \ - RORL $22, DX; \ - ANDL a, CX; \ - XORL CX, BX; \ - XORL DX, DI; \ - ADDL DI, BX - -// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. -// The values for e and a are stored in d and h, ready for rotation. -#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ - SHA256T1(const, e, f, g, h); \ - SHA256T2(a, b, c); \ - MOVL BX, h; \ - ADDL AX, d; \ - ADDL AX, h - -#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE0(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE1(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -TEXT ·block(SB),0,$264-32 - MOVQ p_base+8(FP), SI - MOVQ p_len+16(FP), DX - SHRQ $6, DX - SHLQ $6, DX - - LEAQ (SI)(DX*1), DI - MOVQ DI, 256(SP) - CMPQ SI, DI - JEQ end - - MOVQ dig+0(FP), BP - MOVL (0*4)(BP), R8 // a = H0 - MOVL (1*4)(BP), R9 // b = H1 - MOVL (2*4)(BP), R10 // c = H2 - MOVL (3*4)(BP), R11 // d = H3 - MOVL (4*4)(BP), R12 // e = H4 - MOVL (5*4)(BP), R13 // f = H5 - MOVL (6*4)(BP), R14 // g = H6 - MOVL (7*4)(BP), R15 // h = H7 - -loop: - MOVQ SP, BP // message schedule - - SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND0(2, 0xb5c0fbcf, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND0(3, 0xe9b5dba5, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND0(4, 0x3956c25b, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND0(5, 0x59f111f1, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND0(6, 0x923f82a4, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND0(7, 0xab1c5ed5, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND0(8, 0xd807aa98, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND0(9, 0x12835b01, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND0(10, 0x243185be, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND0(11, 0x550c7dc3, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND0(12, 0x72be5d74, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND0(13, 0x80deb1fe, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND0(14, 0x9bdc06a7, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND0(15, 0xc19bf174, R9, R10, R11, R12, R13, R14, R15, R8) - - SHA256ROUND1(16, 0xe49b69c1, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(17, 0xefbe4786, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(18, 0x0fc19dc6, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(19, 0x240ca1cc, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(20, 0x2de92c6f, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(21, 0x4a7484aa, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(22, 0x5cb0a9dc, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(23, 0x76f988da, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(24, 0x983e5152, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(25, 0xa831c66d, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(26, 0xb00327c8, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(27, 0xbf597fc7, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(28, 0xc6e00bf3, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(29, 0xd5a79147, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(30, 0x06ca6351, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(31, 0x14292967, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(32, 0x27b70a85, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(33, 0x2e1b2138, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(34, 0x4d2c6dfc, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(35, 0x53380d13, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(36, 0x650a7354, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(37, 0x766a0abb, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(38, 0x81c2c92e, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(39, 0x92722c85, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(40, 0xa2bfe8a1, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(41, 0xa81a664b, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(42, 0xc24b8b70, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(43, 0xc76c51a3, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(44, 0xd192e819, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(45, 0xd6990624, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(46, 0xf40e3585, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(47, 0x106aa070, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(48, 0x19a4c116, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(49, 0x1e376c08, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(50, 0x2748774c, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(51, 0x34b0bcb5, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(52, 0x391c0cb3, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(53, 0x4ed8aa4a, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(54, 0x5b9cca4f, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(55, 0x682e6ff3, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(56, 0x748f82ee, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(57, 0x78a5636f, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(58, 0x84c87814, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(59, 0x8cc70208, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(60, 0x90befffa, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(61, 0xa4506ceb, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8) - - MOVQ dig+0(FP), BP - ADDL (0*4)(BP), R8 // H0 = a + H0 - MOVL R8, (0*4)(BP) - ADDL (1*4)(BP), R9 // H1 = b + H1 - MOVL R9, (1*4)(BP) - ADDL (2*4)(BP), R10 // H2 = c + H2 - MOVL R10, (2*4)(BP) - ADDL (3*4)(BP), R11 // H3 = d + H3 - MOVL R11, (3*4)(BP) - ADDL (4*4)(BP), R12 // H4 = e + H4 - MOVL R12, (4*4)(BP) - ADDL (5*4)(BP), R13 // H5 = f + H5 - MOVL R13, (5*4)(BP) - ADDL (6*4)(BP), R14 // H6 = g + H6 - MOVL R14, (6*4)(BP) - ADDL (7*4)(BP), R15 // H7 = h + H7 - MOVL R15, (7*4)(BP) - - ADDQ $64, SI - CMPQ SI, 256(SP) - JB loop - -end: - RET diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_decl.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_decl.go deleted file mode 100644 index a50c9787..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256block_decl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 - -package sha256 - -//go:noescape - -func block(dig *digest, p []byte) diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256resume_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256resume_test.go deleted file mode 100644 index 2ddbda43..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha256/sha256resume_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package sha256 - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/sha256" // To register the stdlib sha224 and sha256 algs. - "hash" - "io" - "testing" - - "github.com/stevvooe/resumable" -) - -func compareResumableHash(t *testing.T, newResumable func() hash.Hash, newStdlib func() hash.Hash) { - // Read 3 Kilobytes of random data into a buffer. - buf := make([]byte, 3*1024) - if _, err := io.ReadFull(rand.Reader, buf); err != nil { - t.Fatalf("unable to load random data: %s", err) - } - - // Use two Hash objects to consume prefixes of the data. One will be - // snapshotted and resumed with each additional byte, then both will write - // that byte. The digests should be equal after each byte is digested. - resumableHasher := newResumable().(resumable.Hash) - stdlibHasher := newStdlib() - - // First, assert that the initial distest is the same. - if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { - t.Fatalf("initial digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) - } - - multiWriter := io.MultiWriter(resumableHasher, stdlibHasher) - - for i := 1; i <= len(buf); i++ { - - // Write the next byte. - multiWriter.Write(buf[i-1 : i]) - - if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { - t.Fatalf("digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) - } - - // Snapshot, reset, and restore the chunk hasher. - hashState, err := resumableHasher.State() - if err != nil { - t.Fatalf("unable to get state of hash function: %s", err) - } - resumableHasher.Reset() - if err := resumableHasher.Restore(hashState); err != nil { - t.Fatalf("unable to restorte state of hash function: %s", err) - } - } -} - -func TestResumable(t *testing.T) { - compareResumableHash(t, New224, sha256.New224) - compareResumableHash(t, New, sha256.New) -} - -func TestResumableRegistered(t *testing.T) { - - for _, hf := range []crypto.Hash{crypto.SHA224, crypto.SHA256} { - // make sure that the hash gets the resumable version from the global - // registry in crypto library. - h := hf.New() - - if rh, ok := h.(resumable.Hash); !ok { - t.Fatalf("non-resumable hash function registered: %#v %#v", rh, crypto.SHA256) - } - - } - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go deleted file mode 100644 index 55b433e7..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/resume.go +++ /dev/null @@ -1,53 +0,0 @@ -package sha512 - -import ( - "bytes" - "encoding/gob" - - // import to ensure that our init function runs after the standard package - _ "crypto/sha512" -) - -// Len returns the number of bytes which have been written to the digest. -func (d *digest) Len() int64 { - return int64(d.len) -} - -// State returns a snapshot of the state of the digest. -func (d *digest) State() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - - // We encode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - d.h, d.x, d.nx, d.len, d.is384, - } - - for _, val := range vals { - if err := encoder.Encode(val); err != nil { - return nil, err - } - } - - return buf.Bytes(), nil -} - -// Restore resets the digest to the given state. -func (d *digest) Restore(state []byte) error { - decoder := gob.NewDecoder(bytes.NewReader(state)) - - // We decode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - &d.h, &d.x, &d.nx, &d.len, &d.is384, - } - - for _, val := range vals { - if err := decoder.Decode(val); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512.go deleted file mode 100644 index bca7a91e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha512 implements the SHA384 and SHA512 hash algorithms as defined -// in FIPS 180-2. -package sha512 - -import ( - "crypto" - "hash" -) - -func init() { - crypto.RegisterHash(crypto.SHA384, New384) - crypto.RegisterHash(crypto.SHA512, New) -} - -// The size of a SHA512 checksum in bytes. -const Size = 64 - -// The size of a SHA384 checksum in bytes. -const Size384 = 48 - -// The blocksize of SHA512 and SHA384 in bytes. -const BlockSize = 128 - -const ( - chunk = 128 - init0 = 0x6a09e667f3bcc908 - init1 = 0xbb67ae8584caa73b - init2 = 0x3c6ef372fe94f82b - init3 = 0xa54ff53a5f1d36f1 - init4 = 0x510e527fade682d1 - init5 = 0x9b05688c2b3e6c1f - init6 = 0x1f83d9abfb41bd6b - init7 = 0x5be0cd19137e2179 - init0_384 = 0xcbbb9d5dc1059ed8 - init1_384 = 0x629a292a367cd507 - init2_384 = 0x9159015a3070dd17 - init3_384 = 0x152fecd8f70e5939 - init4_384 = 0x67332667ffc00b31 - init5_384 = 0x8eb44a8768581511 - init6_384 = 0xdb0c2e0d64f98fa7 - init7_384 = 0x47b5481dbefa4fa4 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - h [8]uint64 - x [chunk]byte - nx int - len uint64 - is384 bool // mark if this digest is SHA-384 -} - -func (d *digest) Reset() { - if !d.is384 { - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - } else { - d.h[0] = init0_384 - d.h[1] = init1_384 - d.h[2] = init2_384 - d.h[3] = init3_384 - d.h[4] = init4_384 - d.h[5] = init5_384 - d.h[6] = init6_384 - d.h[7] = init7_384 - } - d.nx = 0 - d.len = 0 -} - -// New returns a new hash.Hash computing the SHA512 checksum. -func New() hash.Hash { - d := new(digest) - d.Reset() - return d -} - -// New384 returns a new hash.Hash computing the SHA384 checksum. -func New384() hash.Hash { - d := new(digest) - d.is384 = true - d.Reset() - return d -} - -func (d *digest) Size() int { - if !d.is384 { - return Size - } - return Size384 -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - block(d, d.x[:]) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - block(d, p[:n]) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d0 *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d := new(digest) - *d = *d0 - hash := d.checkSum() - if d.is384 { - return append(in, hash[:Size384]...) - } - return append(in, hash[:]...) -} - -func (d *digest) checkSum() [Size]byte { - // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128. - len := d.len - var tmp [128]byte - tmp[0] = 0x80 - if len%128 < 112 { - d.Write(tmp[0 : 112-len%128]) - } else { - d.Write(tmp[0 : 128+112-len%128]) - } - - // Length in bits. - len <<= 3 - for i := uint(0); i < 16; i++ { - tmp[i] = byte(len >> (120 - 8*i)) - } - d.Write(tmp[0:16]) - - if d.nx != 0 { - panic("d.nx != 0") - } - - h := d.h[:] - if d.is384 { - h = d.h[:6] - } - - var digest [Size]byte - for i, s := range h { - digest[i*8] = byte(s >> 56) - digest[i*8+1] = byte(s >> 48) - digest[i*8+2] = byte(s >> 40) - digest[i*8+3] = byte(s >> 32) - digest[i*8+4] = byte(s >> 24) - digest[i*8+5] = byte(s >> 16) - digest[i*8+6] = byte(s >> 8) - digest[i*8+7] = byte(s) - } - - return digest -} - -// Sum512 returns the SHA512 checksum of the data. -func Sum512(data []byte) [Size]byte { - var d digest - d.Reset() - d.Write(data) - return d.checkSum() -} - -// Sum384 returns the SHA384 checksum of the data. -func Sum384(data []byte) (sum384 [Size384]byte) { - var d digest - d.is384 = true - d.Reset() - d.Write(data) - sum := d.checkSum() - copy(sum384[:], sum[:Size384]) - return -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512_test.go deleted file mode 100644 index 541860f7..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512_test.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// SHA512 hash algorithm. See FIPS 180-2. - -package sha512 - -import ( - "fmt" - "io" - "testing" -) - -type sha512Test struct { - out string - in string -} - -var golden = []sha512Test{ - {"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e", ""}, - {"1f40fc92da241694750979ee6cf582f2d5d7d28e18335de05abc54d0560e0f5302860c652bf08d560252aa5e74210546f369fbbbce8c12cfc7957b2652fe9a75", "a"}, - {"2d408a0717ec188158278a796c689044361dc6fdde28d6f04973b80896e1823975cdbf12eb63f9e0591328ee235d80e9b5bf1aa6a44f4617ff3caf6400eb172d", "ab"}, - {"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f", "abc"}, - {"d8022f2060ad6efd297ab73dcc5355c9b214054b0d1776a136a669d26a7d3b14f73aa0d0ebff19ee333368f0164b6419a96da49e3e481753e7e96b716bdccb6f", "abcd"}, - {"878ae65a92e86cac011a570d4c30a7eaec442b85ce8eca0c2952b5e3cc0628c2e79d889ad4d5c7c626986d452dd86374b6ffaa7cd8b67665bef2289a5c70b0a1", "abcde"}, - {"e32ef19623e8ed9d267f657a81944b3d07adbb768518068e88435745564e8d4150a0a703be2a7d88b61e3d390c2bb97e2d4c311fdc69d6b1267f05f59aa920e7", "abcdef"}, - {"d716a4188569b68ab1b6dfac178e570114cdf0ea3a1cc0e31486c3e41241bc6a76424e8c37ab26f096fc85ef9886c8cb634187f4fddff645fb099f1ff54c6b8c", "abcdefg"}, - {"a3a8c81bc97c2560010d7389bc88aac974a104e0e2381220c6e084c4dccd1d2d17d4f86db31c2a851dc80e6681d74733c55dcd03dd96f6062cdda12a291ae6ce", "abcdefgh"}, - {"f22d51d25292ca1d0f68f69aedc7897019308cc9db46efb75a03dd494fc7f126c010e8ade6a00a0c1a5f1b75d81e0ed5a93ce98dc9b833db7839247b1d9c24fe", "abcdefghi"}, - {"ef6b97321f34b1fea2169a7db9e1960b471aa13302a988087357c520be957ca119c3ba68e6b4982c019ec89de3865ccf6a3cda1fe11e59f98d99f1502c8b9745", "abcdefghij"}, - {"2210d99af9c8bdecda1b4beff822136753d8342505ddce37f1314e2cdbb488c6016bdaa9bd2ffa513dd5de2e4b50f031393d8ab61f773b0e0130d7381e0f8a1d", "Discard medicine more than two years old."}, - {"a687a8985b4d8d0a24f115fe272255c6afaf3909225838546159c1ed685c211a203796ae8ecc4c81a5b6315919b3a64f10713da07e341fcdbb08541bf03066ce", "He who has a shady past knows that nice guys finish last."}, - {"8ddb0392e818b7d585ab22769a50df660d9f6d559cca3afc5691b8ca91b8451374e42bcdabd64589ed7c91d85f626596228a5c8572677eb98bc6b624befb7af8", "I wouldn't marry him with a ten foot pole."}, - {"26ed8f6ca7f8d44b6a8a54ae39640fa8ad5c673f70ee9ce074ba4ef0d483eea00bab2f61d8695d6b34df9c6c48ae36246362200ed820448bdc03a720366a87c6", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, - {"e5a14bf044be69615aade89afcf1ab0389d5fc302a884d403579d1386a2400c089b0dbb387ed0f463f9ee342f8244d5a38cfbc0e819da9529fbff78368c9a982", "The days of the digital watch are numbered. -Tom Stoppard"}, - {"420a1faa48919e14651bed45725abe0f7a58e0f099424c4e5a49194946e38b46c1f8034b18ef169b2e31050d1648e0b982386595f7df47da4b6fd18e55333015", "Nepal premier won't resign."}, - {"d926a863beadb20134db07683535c72007b0e695045876254f341ddcccde132a908c5af57baa6a6a9c63e6649bba0c213dc05fadcf9abccea09f23dcfb637fbe", "For every action there is an equal and opposite government program."}, - {"9a98dd9bb67d0da7bf83da5313dff4fd60a4bac0094f1b05633690ffa7f6d61de9a1d4f8617937d560833a9aaa9ccafe3fd24db418d0e728833545cadd3ad92d", "His money is twice tainted: 'taint yours and 'taint mine."}, - {"d7fde2d2351efade52f4211d3746a0780a26eec3df9b2ed575368a8a1c09ec452402293a8ea4eceb5a4f60064ea29b13cdd86918cd7a4faf366160b009804107", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, - {"b0f35ffa2697359c33a56f5c0cf715c7aeed96da9905ca2698acadb08fbc9e669bf566b6bd5d61a3e86dc22999bcc9f2224e33d1d4f32a228cf9d0349e2db518", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, - {"3d2e5f91778c9e66f7e061293aaa8a8fc742dd3b2e4f483772464b1144189b49273e610e5cccd7a81a19ca1fa70f16b10f1a100a4d8c1372336be8484c64b311", "size: a.out: bad magic"}, - {"b2f68ff58ac015efb1c94c908b0d8c2bf06f491e4de8e6302c49016f7f8a33eac3e959856c7fddbc464de618701338a4b46f76dbfaf9a1e5262b5f40639771c7", "The major problem is with sendmail. -Mark Horton"}, - {"d8c92db5fdf52cf8215e4df3b4909d29203ff4d00e9ad0b64a6a4e04dec5e74f62e7c35c7fb881bd5de95442123df8f57a489b0ae616bd326f84d10021121c57", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, - {"19a9f8dc0a233e464e8566ad3ca9b91e459a7b8c4780985b015776e1bf239a19bc233d0556343e2b0a9bc220900b4ebf4f8bdf89ff8efeaf79602d6849e6f72e", "If the enemy is within range, then so are you."}, - {"00b4c41f307bde87301cdc5b5ab1ae9a592e8ecbb2021dd7bc4b34e2ace60741cc362560bec566ba35178595a91932b8d5357e2c9cec92d393b0fa7831852476", "It's well we cannot hear the screams/That we create in others' dreams."}, - {"91eccc3d5375fd026e4d6787874b1dce201cecd8a27dbded5065728cb2d09c58a3d467bb1faf353bf7ba567e005245d5321b55bc344f7c07b91cb6f26c959be7", "You remind me of a TV show, but that's all right: I watch it anyway."}, - {"fabbbe22180f1f137cfdc9556d2570e775d1ae02a597ded43a72a40f9b485d500043b7be128fb9fcd982b83159a0d99aa855a9e7cc4240c00dc01a9bdf8218d7", "C is as portable as Stonehedge!!"}, - {"2ecdec235c1fa4fc2a154d8fba1dddb8a72a1ad73838b51d792331d143f8b96a9f6fcb0f34d7caa351fe6d88771c4f105040e0392f06e0621689d33b2f3ba92e", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, - {"7ad681f6f96f82f7abfa7ecc0334e8fa16d3dc1cdc45b60b7af43fe4075d2357c0c1d60e98350f1afb1f2fe7a4d7cd2ad55b88e458e06b73c40b437331f5dab4", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, - {"833f9248ab4a3b9e5131f745fda1ffd2dd435b30e965957e78291c7ab73605fd1912b0794e5c233ab0a12d205a39778d19b83515d6a47003f19cdee51d98c7e0", "How can you write a big system without C++? -Paul Glick"}, -} - -var golden384 = []sha512Test{ - {"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b", ""}, - {"54a59b9f22b0b80880d8427e548b7c23abd873486e1f035dce9cd697e85175033caa88e6d57bc35efae0b5afd3145f31", "a"}, - {"c7be03ba5bcaa384727076db0018e99248e1a6e8bd1b9ef58a9ec9dd4eeebb3f48b836201221175befa74ddc3d35afdd", "ab"}, - {"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7", "abc"}, - {"1165b3406ff0b52a3d24721f785462ca2276c9f454a116c2b2ba20171a7905ea5a026682eb659c4d5f115c363aa3c79b", "abcd"}, - {"4c525cbeac729eaf4b4665815bc5db0c84fe6300068a727cf74e2813521565abc0ec57a37ee4d8be89d097c0d2ad52f0", "abcde"}, - {"c6a4c65b227e7387b9c3e839d44869c4cfca3ef583dea64117859b808c1e3d8ae689e1e314eeef52a6ffe22681aa11f5", "abcdef"}, - {"9f11fc131123f844c1226f429b6a0a6af0525d9f40f056c7fc16cdf1b06bda08e302554417a59fa7dcf6247421959d22", "abcdefg"}, - {"9000cd7cada59d1d2eb82912f7f24e5e69cc5517f68283b005fa27c285b61e05edf1ad1a8a9bded6fd29eb87d75ad806", "abcdefgh"}, - {"ef54915b60cf062b8dd0c29ae3cad69abe6310de63ac081f46ef019c5c90897caefd79b796cfa81139788a260ded52df", "abcdefghi"}, - {"a12070030a02d86b0ddacd0d3a5b598344513d0a051e7355053e556a0055489c1555399b03342845c4adde2dc44ff66c", "abcdefghij"}, - {"86f58ec2d74d1b7f8eb0c2ff0967316699639e8d4eb129de54bdf34c96cdbabe200d052149f2dd787f43571ba74670d4", "Discard medicine more than two years old."}, - {"ae4a2b639ca9bfa04b1855d5a05fe7f230994f790891c6979103e2605f660c4c1262a48142dcbeb57a1914ba5f7c3fa7", "He who has a shady past knows that nice guys finish last."}, - {"40ae213df6436eca952aa6841886fcdb82908ef1576a99c8f49bb9dd5023169f7c53035abdda0b54c302f4974e2105e7", "I wouldn't marry him with a ten foot pole."}, - {"e7cf8b873c9bc950f06259aa54309f349cefa72c00d597aebf903e6519a50011dfe355afff064a10701c705693848df9", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, - {"c3d4f0f4047181c7d39d34703365f7bf70207183caf2c2f6145f04da895ef69124d9cdeb635da636c3a474e61024e29b", "The days of the digital watch are numbered. -Tom Stoppard"}, - {"a097aab567e167d5cf93676ed73252a69f9687cb3179bb2d27c9878119e94bf7b7c4b58dc90582edfaf66e11388ed714", "Nepal premier won't resign."}, - {"5026ca45c41fc64712eb65065da92f6467541c78f8966d3fe2c8e3fb769a3ec14215f819654b47bd64f7f0eac17184f3", "For every action there is an equal and opposite government program."}, - {"ac1cc0f5ac8d5f5514a7b738ac322b7fb52a161b449c3672e9b6a6ad1a5e4b26b001cf3bad24c56598676ca17d4b445a", "His money is twice tainted: 'taint yours and 'taint mine."}, - {"722d10c5de371ec0c8c4b5247ac8a5f1d240d68c73f8da13d8b25f0166d6f309bf9561979a111a0049405771d201941a", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, - {"dc2d3ea18bfa10549c63bf2b75b39b5167a80c12aff0e05443168ea87ff149fb0eda5e0bd234eb5d48c7d02ffc5807f1", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, - {"1d67c969e2a945ae5346d2139760261504d4ba164c522443afe19ef3e29b152a4c52445489cfc9d7215e5a450e8e1e4e", "size: a.out: bad magic"}, - {"5ff8e075e465646e7b73ef36d812c6e9f7d60fa6ea0e533e5569b4f73cde53cdd2cc787f33540af57cca3fe467d32fe0", "The major problem is with sendmail. -Mark Horton"}, - {"5bd0a997a67c9ae1979a894eb0cde403dde003c9b6f2c03cf21925c42ff4e1176e6df1ca005381612ef18457b9b7ec3b", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, - {"1eee6da33e7e54fc5be52ae23b94b16ba4d2a947ae4505c6a3edfc7401151ea5205ac01b669b56f27d8ef7f175ed7762", "If the enemy is within range, then so are you."}, - {"76b06e9dea66bfbb1a96029426dc0dfd7830bd297eb447ff5358d94a87cd00c88b59df2493fef56ecbb5231073892ea9", "It's well we cannot hear the screams/That we create in others' dreams."}, - {"12acaf21452cff586143e3f5db0bfdf7802c057e1adf2a619031c4e1b0ccc4208cf6cef8fe722bbaa2fb46a30d9135d8", "You remind me of a TV show, but that's all right: I watch it anyway."}, - {"0fc23d7f4183efd186f0bc4fc5db867e026e2146b06cb3d52f4bdbd57d1740122caa853b41868b197b2ac759db39df88", "C is as portable as Stonehedge!!"}, - {"bc805578a7f85d34a86a32976e1c34fe65cf815186fbef76f46ef99cda10723f971f3f1464d488243f5e29db7488598d", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, - {"b23918399a12ebf4431559eec3813eaf7412e875fd7464f16d581e473330842d2e96c6be49a7ce3f9bb0b8bc0fcbe0fe", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, - {"1764b700eb1ead52a2fc33cc28975c2180f1b8faa5038d94cffa8d78154aab16e91dd787e7b0303948ebed62561542c8", "How can you write a big system without C++? -Paul Glick"}, -} - -func TestGolden(t *testing.T) { - for i := 0; i < len(golden); i++ { - g := golden[i] - s := fmt.Sprintf("%x", Sum512([]byte(g.in))) - if s != g.out { - t.Fatalf("Sum512 function: sha512(%s) = %s want %s", g.in, s, g.out) - } - c := New() - for j := 0; j < 3; j++ { - if j < 2 { - io.WriteString(c, g.in) - } else { - io.WriteString(c, g.in[0:len(g.in)/2]) - c.Sum(nil) - io.WriteString(c, g.in[len(g.in)/2:]) - } - s := fmt.Sprintf("%x", c.Sum(nil)) - if s != g.out { - t.Fatalf("sha512[%d](%s) = %s want %s", j, g.in, s, g.out) - } - c.Reset() - } - } - for i := 0; i < len(golden384); i++ { - g := golden384[i] - s := fmt.Sprintf("%x", Sum384([]byte(g.in))) - if s != g.out { - t.Fatalf("Sum384 function: sha384(%s) = %s want %s", g.in, s, g.out) - } - c := New384() - for j := 0; j < 3; j++ { - if j < 2 { - io.WriteString(c, g.in) - } else { - io.WriteString(c, g.in[0:len(g.in)/2]) - c.Sum(nil) - io.WriteString(c, g.in[len(g.in)/2:]) - } - s := fmt.Sprintf("%x", c.Sum(nil)) - if s != g.out { - t.Fatalf("sha384[%d](%s) = %s want %s", j, g.in, s, g.out) - } - c.Reset() - } - } -} - -func TestSize(t *testing.T) { - c := New() - if got := c.Size(); got != Size { - t.Errorf("Size = %d; want %d", got, Size) - } - c = New384() - if got := c.Size(); got != Size384 { - t.Errorf("New384.Size = %d; want %d", got, Size384) - } -} - -func TestBlockSize(t *testing.T) { - c := New() - if got := c.BlockSize(); got != BlockSize { - t.Errorf("BlockSize = %d; want %d", got, BlockSize) - } -} - -var bench = New() -var buf = make([]byte, 8192) - -func benchmarkSize(b *testing.B, size int) { - b.SetBytes(int64(size)) - sum := make([]byte, bench.Size()) - for i := 0; i < b.N; i++ { - bench.Reset() - bench.Write(buf[:size]) - bench.Sum(sum[:0]) - } -} - -func BenchmarkHash8Bytes(b *testing.B) { - benchmarkSize(b, 8) -} - -func BenchmarkHash1K(b *testing.B) { - benchmarkSize(b, 1024) -} - -func BenchmarkHash8K(b *testing.B) { - benchmarkSize(b, 8192) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block.go deleted file mode 100644 index 648ae8f7..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 - -// SHA512 block step. -// In its own file so that a faster assembly or C version -// can be substituted easily. - -package sha512 - -var _K = []uint64{ - 0x428a2f98d728ae22, - 0x7137449123ef65cd, - 0xb5c0fbcfec4d3b2f, - 0xe9b5dba58189dbbc, - 0x3956c25bf348b538, - 0x59f111f1b605d019, - 0x923f82a4af194f9b, - 0xab1c5ed5da6d8118, - 0xd807aa98a3030242, - 0x12835b0145706fbe, - 0x243185be4ee4b28c, - 0x550c7dc3d5ffb4e2, - 0x72be5d74f27b896f, - 0x80deb1fe3b1696b1, - 0x9bdc06a725c71235, - 0xc19bf174cf692694, - 0xe49b69c19ef14ad2, - 0xefbe4786384f25e3, - 0x0fc19dc68b8cd5b5, - 0x240ca1cc77ac9c65, - 0x2de92c6f592b0275, - 0x4a7484aa6ea6e483, - 0x5cb0a9dcbd41fbd4, - 0x76f988da831153b5, - 0x983e5152ee66dfab, - 0xa831c66d2db43210, - 0xb00327c898fb213f, - 0xbf597fc7beef0ee4, - 0xc6e00bf33da88fc2, - 0xd5a79147930aa725, - 0x06ca6351e003826f, - 0x142929670a0e6e70, - 0x27b70a8546d22ffc, - 0x2e1b21385c26c926, - 0x4d2c6dfc5ac42aed, - 0x53380d139d95b3df, - 0x650a73548baf63de, - 0x766a0abb3c77b2a8, - 0x81c2c92e47edaee6, - 0x92722c851482353b, - 0xa2bfe8a14cf10364, - 0xa81a664bbc423001, - 0xc24b8b70d0f89791, - 0xc76c51a30654be30, - 0xd192e819d6ef5218, - 0xd69906245565a910, - 0xf40e35855771202a, - 0x106aa07032bbd1b8, - 0x19a4c116b8d2d0c8, - 0x1e376c085141ab53, - 0x2748774cdf8eeb99, - 0x34b0bcb5e19b48a8, - 0x391c0cb3c5c95a63, - 0x4ed8aa4ae3418acb, - 0x5b9cca4f7763e373, - 0x682e6ff3d6b2b8a3, - 0x748f82ee5defb2fc, - 0x78a5636f43172f60, - 0x84c87814a1f0ab72, - 0x8cc702081a6439ec, - 0x90befffa23631e28, - 0xa4506cebde82bde9, - 0xbef9a3f7b2c67915, - 0xc67178f2e372532b, - 0xca273eceea26619c, - 0xd186b8c721c0c207, - 0xeada7dd6cde0eb1e, - 0xf57d4f7fee6ed178, - 0x06f067aa72176fba, - 0x0a637dc5a2c898a6, - 0x113f9804bef90dae, - 0x1b710b35131c471b, - 0x28db77f523047d84, - 0x32caab7b40c72493, - 0x3c9ebe0a15c9bebc, - 0x431d67c49c100d4c, - 0x4cc5d4becb3e42b6, - 0x597f299cfc657e2a, - 0x5fcb6fab3ad6faec, - 0x6c44198c4a475817, -} - -func block(dig *digest, p []byte) { - var w [80]uint64 - h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] - for len(p) >= chunk { - for i := 0; i < 16; i++ { - j := i * 8 - w[i] = uint64(p[j])<<56 | uint64(p[j+1])<<48 | uint64(p[j+2])<<40 | uint64(p[j+3])<<32 | - uint64(p[j+4])<<24 | uint64(p[j+5])<<16 | uint64(p[j+6])<<8 | uint64(p[j+7]) - } - for i := 16; i < 80; i++ { - v1 := w[i-2] - t1 := (v1>>19 | v1<<(64-19)) ^ (v1>>61 | v1<<(64-61)) ^ (v1 >> 6) - v2 := w[i-15] - t2 := (v2>>1 | v2<<(64-1)) ^ (v2>>8 | v2<<(64-8)) ^ (v2 >> 7) - - w[i] = t1 + w[i-7] + t2 + w[i-16] - } - - a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 - - for i := 0; i < 80; i++ { - t1 := h + ((e>>14 | e<<(64-14)) ^ (e>>18 | e<<(64-18)) ^ (e>>41 | e<<(64-41))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] - - t2 := ((a>>28 | a<<(64-28)) ^ (a>>34 | a<<(64-34)) ^ (a>>39 | a<<(64-39))) + ((a & b) ^ (a & c) ^ (b & c)) - - h = g - g = f - f = e - e = d + t1 - d = c - c = b - b = a - a = t1 + t2 - } - - h0 += a - h1 += b - h2 += c - h3 += d - h4 += e - h5 += f - h6 += g - h7 += h - - p = p[chunk:] - } - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_amd64.s b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_amd64.s deleted file mode 100644 index 2e10233d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_amd64.s +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// SHA512 block routine. See sha512block.go for Go equivalent. -// -// The algorithm is detailed in FIPS 180-4: -// -// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf -// -// Wt = Mt; for 0 <= t <= 15 -// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 -// -// a = H0 -// b = H1 -// c = H2 -// d = H3 -// e = H4 -// f = H5 -// g = H6 -// h = H7 -// -// for t = 0 to 79 { -// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt -// T2 = BIGSIGMA0(a) + Maj(a,b,c) -// h = g -// g = f -// f = e -// e = d + T1 -// d = c -// c = b -// b = a -// a = T1 + T2 -// } -// -// H0 = a + H0 -// H1 = b + H1 -// H2 = c + H2 -// H3 = d + H3 -// H4 = e + H4 -// H5 = f + H5 -// H6 = g + H6 -// H7 = h + H7 - -// Wt = Mt; for 0 <= t <= 15 -#define MSGSCHEDULE0(index) \ - MOVQ (index*8)(SI), AX; \ - BSWAPQ AX; \ - MOVQ AX, (index*8)(BP) - -// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 -// SIGMA0(x) = ROTR(1,x) XOR ROTR(8,x) XOR SHR(7,x) -// SIGMA1(x) = ROTR(19,x) XOR ROTR(61,x) XOR SHR(6,x) -#define MSGSCHEDULE1(index) \ - MOVQ ((index-2)*8)(BP), AX; \ - MOVQ AX, CX; \ - RORQ $19, AX; \ - MOVQ CX, DX; \ - RORQ $61, CX; \ - SHRQ $6, DX; \ - MOVQ ((index-15)*8)(BP), BX; \ - XORQ CX, AX; \ - MOVQ BX, CX; \ - XORQ DX, AX; \ - RORQ $1, BX; \ - MOVQ CX, DX; \ - SHRQ $7, DX; \ - RORQ $8, CX; \ - ADDQ ((index-7)*8)(BP), AX; \ - XORQ CX, BX; \ - XORQ DX, BX; \ - ADDQ ((index-16)*8)(BP), BX; \ - ADDQ BX, AX; \ - MOVQ AX, ((index)*8)(BP) - -// Calculate T1 in AX - uses AX, CX and DX registers. -// h is also used as an accumulator. Wt is passed in AX. -// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt -// BIGSIGMA1(x) = ROTR(14,x) XOR ROTR(18,x) XOR ROTR(41,x) -// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) -#define SHA512T1(const, e, f, g, h) \ - MOVQ $const, DX; \ - ADDQ AX, h; \ - MOVQ e, AX; \ - ADDQ DX, h; \ - MOVQ e, CX; \ - RORQ $14, AX; \ - MOVQ e, DX; \ - RORQ $18, CX; \ - XORQ CX, AX; \ - MOVQ e, CX; \ - RORQ $41, DX; \ - ANDQ f, CX; \ - XORQ AX, DX; \ - MOVQ e, AX; \ - NOTQ AX; \ - ADDQ DX, h; \ - ANDQ g, AX; \ - XORQ CX, AX; \ - ADDQ h, AX - -// Calculate T2 in BX - uses BX, CX, DX and DI registers. -// T2 = BIGSIGMA0(a) + Maj(a, b, c) -// BIGSIGMA0(x) = ROTR(28,x) XOR ROTR(34,x) XOR ROTR(39,x) -// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) -#define SHA512T2(a, b, c) \ - MOVQ a, DI; \ - MOVQ c, BX; \ - RORQ $28, DI; \ - MOVQ a, DX; \ - ANDQ b, BX; \ - RORQ $34, DX; \ - MOVQ a, CX; \ - ANDQ c, CX; \ - XORQ DX, DI; \ - XORQ CX, BX; \ - MOVQ a, DX; \ - MOVQ b, CX; \ - RORQ $39, DX; \ - ANDQ a, CX; \ - XORQ CX, BX; \ - XORQ DX, DI; \ - ADDQ DI, BX - -// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. -// The values for e and a are stored in d and h, ready for rotation. -#define SHA512ROUND(index, const, a, b, c, d, e, f, g, h) \ - SHA512T1(const, e, f, g, h); \ - SHA512T2(a, b, c); \ - MOVQ BX, h; \ - ADDQ AX, d; \ - ADDQ AX, h - -#define SHA512ROUND0(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE0(index); \ - SHA512ROUND(index, const, a, b, c, d, e, f, g, h) - -#define SHA512ROUND1(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE1(index); \ - SHA512ROUND(index, const, a, b, c, d, e, f, g, h) - -TEXT ·block(SB),0,$648-32 - MOVQ p_base+8(FP), SI - MOVQ p_len+16(FP), DX - SHRQ $7, DX - SHLQ $7, DX - - LEAQ (SI)(DX*1), DI - MOVQ DI, 640(SP) - CMPQ SI, DI - JEQ end - - MOVQ dig+0(FP), BP - MOVQ (0*8)(BP), R8 // a = H0 - MOVQ (1*8)(BP), R9 // b = H1 - MOVQ (2*8)(BP), R10 // c = H2 - MOVQ (3*8)(BP), R11 // d = H3 - MOVQ (4*8)(BP), R12 // e = H4 - MOVQ (5*8)(BP), R13 // f = H5 - MOVQ (6*8)(BP), R14 // g = H6 - MOVQ (7*8)(BP), R15 // h = H7 - -loop: - MOVQ SP, BP // message schedule - - SHA512ROUND0(0, 0x428a2f98d728ae22, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND0(1, 0x7137449123ef65cd, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND0(2, 0xb5c0fbcfec4d3b2f, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND0(3, 0xe9b5dba58189dbbc, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND0(4, 0x3956c25bf348b538, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND0(5, 0x59f111f1b605d019, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND0(6, 0x923f82a4af194f9b, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND0(7, 0xab1c5ed5da6d8118, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND0(8, 0xd807aa98a3030242, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND0(9, 0x12835b0145706fbe, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND0(10, 0x243185be4ee4b28c, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND0(11, 0x550c7dc3d5ffb4e2, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND0(12, 0x72be5d74f27b896f, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND0(13, 0x80deb1fe3b1696b1, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND0(14, 0x9bdc06a725c71235, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND0(15, 0xc19bf174cf692694, R9, R10, R11, R12, R13, R14, R15, R8) - - SHA512ROUND1(16, 0xe49b69c19ef14ad2, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(17, 0xefbe4786384f25e3, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(18, 0x0fc19dc68b8cd5b5, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(19, 0x240ca1cc77ac9c65, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(20, 0x2de92c6f592b0275, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(21, 0x4a7484aa6ea6e483, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(22, 0x5cb0a9dcbd41fbd4, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(23, 0x76f988da831153b5, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(24, 0x983e5152ee66dfab, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(25, 0xa831c66d2db43210, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(26, 0xb00327c898fb213f, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(27, 0xbf597fc7beef0ee4, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(28, 0xc6e00bf33da88fc2, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(29, 0xd5a79147930aa725, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(30, 0x06ca6351e003826f, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(31, 0x142929670a0e6e70, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(32, 0x27b70a8546d22ffc, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(33, 0x2e1b21385c26c926, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(34, 0x4d2c6dfc5ac42aed, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(35, 0x53380d139d95b3df, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(36, 0x650a73548baf63de, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(37, 0x766a0abb3c77b2a8, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(38, 0x81c2c92e47edaee6, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(39, 0x92722c851482353b, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(40, 0xa2bfe8a14cf10364, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(41, 0xa81a664bbc423001, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(42, 0xc24b8b70d0f89791, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(43, 0xc76c51a30654be30, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(44, 0xd192e819d6ef5218, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(45, 0xd69906245565a910, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(46, 0xf40e35855771202a, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(47, 0x106aa07032bbd1b8, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(48, 0x19a4c116b8d2d0c8, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(49, 0x1e376c085141ab53, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(50, 0x2748774cdf8eeb99, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(51, 0x34b0bcb5e19b48a8, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(52, 0x391c0cb3c5c95a63, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(53, 0x4ed8aa4ae3418acb, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(54, 0x5b9cca4f7763e373, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(55, 0x682e6ff3d6b2b8a3, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(56, 0x748f82ee5defb2fc, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(57, 0x78a5636f43172f60, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(58, 0x84c87814a1f0ab72, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(59, 0x8cc702081a6439ec, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(60, 0x90befffa23631e28, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(61, 0xa4506cebde82bde9, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(62, 0xbef9a3f7b2c67915, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(63, 0xc67178f2e372532b, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(64, 0xca273eceea26619c, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(65, 0xd186b8c721c0c207, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(66, 0xeada7dd6cde0eb1e, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(67, 0xf57d4f7fee6ed178, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(68, 0x06f067aa72176fba, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(69, 0x0a637dc5a2c898a6, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(70, 0x113f9804bef90dae, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(71, 0x1b710b35131c471b, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(72, 0x28db77f523047d84, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(73, 0x32caab7b40c72493, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(74, 0x3c9ebe0a15c9bebc, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(75, 0x431d67c49c100d4c, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(76, 0x4cc5d4becb3e42b6, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(77, 0x597f299cfc657e2a, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(78, 0x5fcb6fab3ad6faec, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(79, 0x6c44198c4a475817, R9, R10, R11, R12, R13, R14, R15, R8) - - MOVQ dig+0(FP), BP - ADDQ (0*8)(BP), R8 // H0 = a + H0 - MOVQ R8, (0*8)(BP) - ADDQ (1*8)(BP), R9 // H1 = b + H1 - MOVQ R9, (1*8)(BP) - ADDQ (2*8)(BP), R10 // H2 = c + H2 - MOVQ R10, (2*8)(BP) - ADDQ (3*8)(BP), R11 // H3 = d + H3 - MOVQ R11, (3*8)(BP) - ADDQ (4*8)(BP), R12 // H4 = e + H4 - MOVQ R12, (4*8)(BP) - ADDQ (5*8)(BP), R13 // H5 = f + H5 - MOVQ R13, (5*8)(BP) - ADDQ (6*8)(BP), R14 // H6 = g + H6 - MOVQ R14, (6*8)(BP) - ADDQ (7*8)(BP), R15 // H7 = h + H7 - MOVQ R15, (7*8)(BP) - - ADDQ $128, SI - CMPQ SI, 640(SP) - JB loop - -end: - RET diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_decl.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_decl.go deleted file mode 100644 index bef99de2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512block_decl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64 - -package sha512 - -//go:noescape - -func block(dig *digest, p []byte) diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512resume_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512resume_test.go deleted file mode 100644 index 3066c2ae..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/stevvooe/resumable/sha512/sha512resume_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package sha512 - -import ( - "bytes" - "crypto" - "crypto/rand" // To register the stdlib sha224 and sha256 algs. - "crypto/sha512" - "hash" - "io" - "testing" - - "github.com/stevvooe/resumable" -) - -func compareResumableHash(t *testing.T, newResumable func() hash.Hash, newStdlib func() hash.Hash) { - // Read 3 Kilobytes of random data into a buffer. - buf := make([]byte, 3*1024) - if _, err := io.ReadFull(rand.Reader, buf); err != nil { - t.Fatalf("unable to load random data: %s", err) - } - - // Use two Hash objects to consume prefixes of the data. One will be - // snapshotted and resumed with each additional byte, then both will write - // that byte. The digests should be equal after each byte is digested. - resumableHasher := newResumable().(resumable.Hash) - stdlibHasher := newStdlib() - - // First, assert that the initial distest is the same. - if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { - t.Fatalf("initial digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) - } - - multiWriter := io.MultiWriter(resumableHasher, stdlibHasher) - - for i := 1; i <= len(buf); i++ { - - // Write the next byte. - multiWriter.Write(buf[i-1 : i]) - - if !bytes.Equal(resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) { - t.Fatalf("digests do not match: got %x, expected %x", resumableHasher.Sum(nil), stdlibHasher.Sum(nil)) - } - - // Snapshot, reset, and restore the chunk hasher. - hashState, err := resumableHasher.State() - if err != nil { - t.Fatalf("unable to get state of hash function: %s", err) - } - resumableHasher.Reset() - if err := resumableHasher.Restore(hashState); err != nil { - t.Fatalf("unable to restorte state of hash function: %s", err) - } - } -} - -func TestResumable(t *testing.T) { - compareResumableHash(t, New384, sha512.New384) - compareResumableHash(t, New, sha512.New) -} - -func TestResumableRegistered(t *testing.T) { - - for _, hf := range []crypto.Hash{crypto.SHA384, crypto.SHA512} { - // make sure that the hash gets the resumable version from the global - // registry in crypto library. - h := hf.New() - - if rh, ok := h.(resumable.Hash); !ok { - t.Fatalf("non-resumable hash function registered: %#v %#v", rh, crypto.SHA256) - } - - } - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/.gitignore b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/.gitignore deleted file mode 100644 index 83c8f823..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -*.[68] -*.a -*.out -*.swp -_obj -_testmain.go -cmd/metrics-bench/metrics-bench -cmd/metrics-example/metrics-example -cmd/never-read/never-read diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/LICENSE deleted file mode 100644 index 363fa9ee..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright 2012 Richard Crowley. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation -are those of the authors and should not be interpreted as representing -official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/README.md deleted file mode 100644 index e0091a4b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/README.md +++ /dev/null @@ -1,104 +0,0 @@ -go-metrics -========== - -Go port of Coda Hale's Metrics library: . - -Documentation: . - -Usage ------ - -Create and update metrics: - -```go -c := metrics.NewCounter() -metrics.Register("foo", c) -c.Inc(47) - -g := metrics.NewGauge() -metrics.Register("bar", g) -g.Update(47) - -s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) -h := metrics.NewHistogram(s) -metrics.Register("baz", h) -h.Update(47) - -m := metrics.NewMeter() -metrics.Register("quux", m) -m.Mark(47) - -t := metrics.NewTimer() -metrics.Register("bang", t) -t.Time(func() {}) -t.Update(47) -``` - -Periodically log every metric in human-readable form to standard error: - -```go -go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) -``` - -Periodically log every metric in slightly-more-parseable form to syslog: - -```go -w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") -go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) -``` - -Periodically emit every metric to Graphite: - -```go -addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") -go metrics.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) -``` - -Periodically emit every metric into InfluxDB: - -```go -import "github.com/rcrowley/go-metrics/influxdb" - -go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{ - Host: "127.0.0.1:8086", - Database: "metrics", - Username: "test", - Password: "test", -}) -``` - -Periodically upload every metric to Librato: - -```go -import "github.com/rcrowley/go-metrics/librato" - -go librato.Librato(metrics.DefaultRegistry, - 10e9, // interval - "example@example.com", // account owner email address - "token", // Librato API token - "hostname", // source - []float64{0.95}, // precentiles to send - time.Millisecond, // time unit -) -``` - -Periodically emit every metric to StatHat: - -```go -import "github.com/rcrowley/go-metrics/stathat" - -go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") -``` - -Installation ------------- - -```sh -go get github.com/rcrowley/go-metrics -``` - -StatHat support additionally requires their Go client: - -```sh -go get github.com/stathat/go -``` diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-bench/metrics-bench.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-bench/metrics-bench.go deleted file mode 100644 index dddaf4b1..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-bench/metrics-bench.go +++ /dev/null @@ -1,20 +0,0 @@ -package main - -import ( - "fmt" - "github.com/rcrowley/go-metrics" - "time" -) - -func main() { - r := metrics.NewRegistry() - for i := 0; i < 10000; i++ { - r.Register(fmt.Sprintf("counter-%d", i), metrics.NewCounter()) - r.Register(fmt.Sprintf("gauge-%d", i), metrics.NewGauge()) - r.Register(fmt.Sprintf("gaugefloat64-%d", i), metrics.NewGaugeFloat64()) - r.Register(fmt.Sprintf("histogram-uniform-%d", i), metrics.NewHistogram(metrics.NewUniformSample(1028))) - r.Register(fmt.Sprintf("histogram-exp-%d", i), metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015))) - r.Register(fmt.Sprintf("meter-%d", i), metrics.NewMeter()) - } - time.Sleep(600e9) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-example/metrics-example.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-example/metrics-example.go deleted file mode 100644 index 66f42c04..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-example/metrics-example.go +++ /dev/null @@ -1,154 +0,0 @@ -package main - -import ( - "errors" - "github.com/rcrowley/go-metrics" - // "github.com/rcrowley/go-metrics/stathat" - "log" - "math/rand" - "os" - // "syslog" - "time" -) - -const fanout = 10 - -func main() { - - r := metrics.NewRegistry() - - c := metrics.NewCounter() - r.Register("foo", c) - for i := 0; i < fanout; i++ { - go func() { - for { - c.Dec(19) - time.Sleep(300e6) - } - }() - go func() { - for { - c.Inc(47) - time.Sleep(400e6) - } - }() - } - - g := metrics.NewGauge() - r.Register("bar", g) - for i := 0; i < fanout; i++ { - go func() { - for { - g.Update(19) - time.Sleep(300e6) - } - }() - go func() { - for { - g.Update(47) - time.Sleep(400e6) - } - }() - } - - gf := metrics.NewGaugeFloat64() - r.Register("barfloat64", gf) - for i := 0; i < fanout; i++ { - go func() { - for { - g.Update(19.0) - time.Sleep(300e6) - } - }() - go func() { - for { - g.Update(47.0) - time.Sleep(400e6) - } - }() - } - - hc := metrics.NewHealthcheck(func(h metrics.Healthcheck) { - if 0 < rand.Intn(2) { - h.Healthy() - } else { - h.Unhealthy(errors.New("baz")) - } - }) - r.Register("baz", hc) - - s := metrics.NewExpDecaySample(1028, 0.015) - //s := metrics.NewUniformSample(1028) - h := metrics.NewHistogram(s) - r.Register("bang", h) - for i := 0; i < fanout; i++ { - go func() { - for { - h.Update(19) - time.Sleep(300e6) - } - }() - go func() { - for { - h.Update(47) - time.Sleep(400e6) - } - }() - } - - m := metrics.NewMeter() - r.Register("quux", m) - for i := 0; i < fanout; i++ { - go func() { - for { - m.Mark(19) - time.Sleep(300e6) - } - }() - go func() { - for { - m.Mark(47) - time.Sleep(400e6) - } - }() - } - - t := metrics.NewTimer() - r.Register("hooah", t) - for i := 0; i < fanout; i++ { - go func() { - for { - t.Time(func() { time.Sleep(300e6) }) - } - }() - go func() { - for { - t.Time(func() { time.Sleep(400e6) }) - } - }() - } - - metrics.RegisterDebugGCStats(r) - go metrics.CaptureDebugGCStats(r, 5e9) - - metrics.RegisterRuntimeMemStats(r) - go metrics.CaptureRuntimeMemStats(r, 5e9) - - metrics.Log(r, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) - - /* - w, err := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") - if nil != err { log.Fatalln(err) } - metrics.Syslog(r, 60e9, w) - */ - - /* - addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") - metrics.Graphite(r, 10e9, "metrics", addr) - */ - - /* - stathat.Stathat(r, 10e9, "example@example.com") - */ - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/never-read/never-read.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/never-read/never-read.go deleted file mode 100644 index dc175b77..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/never-read/never-read.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "log" - "net" -) - -func main() { - addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") - l, err := net.ListenTCP("tcp", addr) - if nil != err { - log.Fatalln(err) - } - log.Println("listening", l.Addr()) - for { - c, err := l.AcceptTCP() - if nil != err { - log.Fatalln(err) - } - log.Println("accepted", c.RemoteAddr()) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter.go deleted file mode 100644 index bb7b039c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter.go +++ /dev/null @@ -1,112 +0,0 @@ -package metrics - -import "sync/atomic" - -// Counters hold an int64 value that can be incremented and decremented. -type Counter interface { - Clear() - Count() int64 - Dec(int64) - Inc(int64) - Snapshot() Counter -} - -// GetOrRegisterCounter returns an existing Counter or constructs and registers -// a new StandardCounter. -func GetOrRegisterCounter(name string, r Registry) Counter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounter).(Counter) -} - -// NewCounter constructs a new StandardCounter. -func NewCounter() Counter { - if UseNilMetrics { - return NilCounter{} - } - return &StandardCounter{0} -} - -// NewRegisteredCounter constructs and registers a new StandardCounter. -func NewRegisteredCounter(name string, r Registry) Counter { - c := NewCounter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// CounterSnapshot is a read-only copy of another Counter. -type CounterSnapshot int64 - -// Clear panics. -func (CounterSnapshot) Clear() { - panic("Clear called on a CounterSnapshot") -} - -// Count returns the count at the time the snapshot was taken. -func (c CounterSnapshot) Count() int64 { return int64(c) } - -// Dec panics. -func (CounterSnapshot) Dec(int64) { - panic("Dec called on a CounterSnapshot") -} - -// Inc panics. -func (CounterSnapshot) Inc(int64) { - panic("Inc called on a CounterSnapshot") -} - -// Snapshot returns the snapshot. -func (c CounterSnapshot) Snapshot() Counter { return c } - -// NilCounter is a no-op Counter. -type NilCounter struct{} - -// Clear is a no-op. -func (NilCounter) Clear() {} - -// Count is a no-op. -func (NilCounter) Count() int64 { return 0 } - -// Dec is a no-op. -func (NilCounter) Dec(i int64) {} - -// Inc is a no-op. -func (NilCounter) Inc(i int64) {} - -// Snapshot is a no-op. -func (NilCounter) Snapshot() Counter { return NilCounter{} } - -// StandardCounter is the standard implementation of a Counter and uses the -// sync/atomic package to manage a single int64 value. -type StandardCounter struct { - count int64 -} - -// Clear sets the counter to zero. -func (c *StandardCounter) Clear() { - atomic.StoreInt64(&c.count, 0) -} - -// Count returns the current count. -func (c *StandardCounter) Count() int64 { - return atomic.LoadInt64(&c.count) -} - -// Dec decrements the counter by the given amount. -func (c *StandardCounter) Dec(i int64) { - atomic.AddInt64(&c.count, -i) -} - -// Inc increments the counter by the given amount. -func (c *StandardCounter) Inc(i int64) { - atomic.AddInt64(&c.count, i) -} - -// Snapshot returns a read-only copy of the counter. -func (c *StandardCounter) Snapshot() Counter { - return CounterSnapshot(c.Count()) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter_test.go deleted file mode 100644 index dfb03b4e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package metrics - -import "testing" - -func BenchmarkCounter(b *testing.B) { - c := NewCounter() - b.ResetTimer() - for i := 0; i < b.N; i++ { - c.Inc(1) - } -} - -func TestCounterClear(t *testing.T) { - c := NewCounter() - c.Inc(1) - c.Clear() - if count := c.Count(); 0 != count { - t.Errorf("c.Count(): 0 != %v\n", count) - } -} - -func TestCounterDec1(t *testing.T) { - c := NewCounter() - c.Dec(1) - if count := c.Count(); -1 != count { - t.Errorf("c.Count(): -1 != %v\n", count) - } -} - -func TestCounterDec2(t *testing.T) { - c := NewCounter() - c.Dec(2) - if count := c.Count(); -2 != count { - t.Errorf("c.Count(): -2 != %v\n", count) - } -} - -func TestCounterInc1(t *testing.T) { - c := NewCounter() - c.Inc(1) - if count := c.Count(); 1 != count { - t.Errorf("c.Count(): 1 != %v\n", count) - } -} - -func TestCounterInc2(t *testing.T) { - c := NewCounter() - c.Inc(2) - if count := c.Count(); 2 != count { - t.Errorf("c.Count(): 2 != %v\n", count) - } -} - -func TestCounterSnapshot(t *testing.T) { - c := NewCounter() - c.Inc(1) - snapshot := c.Snapshot() - c.Inc(1) - if count := snapshot.Count(); 1 != count { - t.Errorf("c.Count(): 1 != %v\n", count) - } -} - -func TestCounterZero(t *testing.T) { - c := NewCounter() - if count := c.Count(); 0 != count { - t.Errorf("c.Count(): 0 != %v\n", count) - } -} - -func TestGetOrRegisterCounter(t *testing.T) { - r := NewRegistry() - NewRegisteredCounter("foo", r).Inc(47) - if c := GetOrRegisterCounter("foo", r); 47 != c.Count() { - t.Fatal(c) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug.go deleted file mode 100644 index 043ccefa..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug.go +++ /dev/null @@ -1,76 +0,0 @@ -package metrics - -import ( - "runtime/debug" - "time" -) - -var ( - debugMetrics struct { - GCStats struct { - LastGC Gauge - NumGC Gauge - Pause Histogram - //PauseQuantiles Histogram - PauseTotal Gauge - } - ReadGCStats Timer - } - gcStats debug.GCStats -) - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called as a goroutine. -func CaptureDebugGCStats(r Registry, d time.Duration) { - for _ = range time.Tick(d) { - CaptureDebugGCStatsOnce(r) - } -} - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called in a background goroutine. -// Giving a registry which has not been given to RegisterDebugGCStats will -// panic. -// -// Be careful (but much less so) with this because debug.ReadGCStats calls -// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world -// operation, isn't something you want to be doing all the time. -func CaptureDebugGCStatsOnce(r Registry) { - lastGC := gcStats.LastGC - t := time.Now() - debug.ReadGCStats(&gcStats) - debugMetrics.ReadGCStats.UpdateSince(t) - - debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) - debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) - if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { - debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) - } - //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) - debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) -} - -// Register metrics for the Go garbage collector statistics exported in -// debug.GCStats. The metrics are named by their fully-qualified Go symbols, -// i.e. debug.GCStats.PauseTotal. -func RegisterDebugGCStats(r Registry) { - debugMetrics.GCStats.LastGC = NewGauge() - debugMetrics.GCStats.NumGC = NewGauge() - debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) - //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) - debugMetrics.GCStats.PauseTotal = NewGauge() - debugMetrics.ReadGCStats = NewTimer() - - r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) - r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) - r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) - //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) - r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) - r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) -} - -// Allocate an initial slice for gcStats.Pause to avoid allocations during -// normal operation. -func init() { - gcStats.Pause = make([]time.Duration, 11) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug_test.go deleted file mode 100644 index 07eb8678..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package metrics - -import ( - "runtime" - "runtime/debug" - "testing" - "time" -) - -func BenchmarkDebugGCStats(b *testing.B) { - r := NewRegistry() - RegisterDebugGCStats(r) - b.ResetTimer() - for i := 0; i < b.N; i++ { - CaptureDebugGCStatsOnce(r) - } -} - -func TestDebugGCStatsBlocking(t *testing.T) { - if g := runtime.GOMAXPROCS(0); g < 2 { - t.Skipf("skipping TestDebugGCMemStatsBlocking with GOMAXPROCS=%d\n", g) - return - } - ch := make(chan int) - go testDebugGCStatsBlocking(ch) - var gcStats debug.GCStats - t0 := time.Now() - debug.ReadGCStats(&gcStats) - t1 := time.Now() - t.Log("i++ during debug.ReadGCStats:", <-ch) - go testDebugGCStatsBlocking(ch) - d := t1.Sub(t0) - t.Log(d) - time.Sleep(d) - t.Log("i++ during time.Sleep:", <-ch) -} - -func testDebugGCStatsBlocking(ch chan int) { - i := 0 - for { - select { - case ch <- i: - return - default: - i++ - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma.go deleted file mode 100644 index 7c152a17..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma.go +++ /dev/null @@ -1,118 +0,0 @@ -package metrics - -import ( - "math" - "sync" - "sync/atomic" -) - -// EWMAs continuously calculate an exponentially-weighted moving average -// based on an outside source of clock ticks. -type EWMA interface { - Rate() float64 - Snapshot() EWMA - Tick() - Update(int64) -} - -// NewEWMA constructs a new EWMA with the given alpha. -func NewEWMA(alpha float64) EWMA { - if UseNilMetrics { - return NilEWMA{} - } - return &StandardEWMA{alpha: alpha} -} - -// NewEWMA1 constructs a new EWMA for a one-minute moving average. -func NewEWMA1() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/1)) -} - -// NewEWMA5 constructs a new EWMA for a five-minute moving average. -func NewEWMA5() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/5)) -} - -// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. -func NewEWMA15() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/15)) -} - -// EWMASnapshot is a read-only copy of another EWMA. -type EWMASnapshot float64 - -// Rate returns the rate of events per second at the time the snapshot was -// taken. -func (a EWMASnapshot) Rate() float64 { return float64(a) } - -// Snapshot returns the snapshot. -func (a EWMASnapshot) Snapshot() EWMA { return a } - -// Tick panics. -func (EWMASnapshot) Tick() { - panic("Tick called on an EWMASnapshot") -} - -// Update panics. -func (EWMASnapshot) Update(int64) { - panic("Update called on an EWMASnapshot") -} - -// NilEWMA is a no-op EWMA. -type NilEWMA struct{} - -// Rate is a no-op. -func (NilEWMA) Rate() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } - -// Tick is a no-op. -func (NilEWMA) Tick() {} - -// Update is a no-op. -func (NilEWMA) Update(n int64) {} - -// StandardEWMA is the standard implementation of an EWMA and tracks the number -// of uncounted events and processes them on each tick. It uses the -// sync/atomic package to manage uncounted events. -type StandardEWMA struct { - uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment - alpha float64 - rate float64 - init bool - mutex sync.Mutex -} - -// Rate returns the moving average rate of events per second. -func (a *StandardEWMA) Rate() float64 { - a.mutex.Lock() - defer a.mutex.Unlock() - return a.rate * float64(1e9) -} - -// Snapshot returns a read-only copy of the EWMA. -func (a *StandardEWMA) Snapshot() EWMA { - return EWMASnapshot(a.Rate()) -} - -// Tick ticks the clock to update the moving average. It assumes it is called -// every five seconds. -func (a *StandardEWMA) Tick() { - count := atomic.LoadInt64(&a.uncounted) - atomic.AddInt64(&a.uncounted, -count) - instantRate := float64(count) / float64(5e9) - a.mutex.Lock() - defer a.mutex.Unlock() - if a.init { - a.rate += a.alpha * (instantRate - a.rate) - } else { - a.init = true - a.rate = instantRate - } -} - -// Update adds n uncounted events. -func (a *StandardEWMA) Update(n int64) { - atomic.AddInt64(&a.uncounted, n) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma_test.go deleted file mode 100644 index 0430fbd2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package metrics - -import "testing" - -func BenchmarkEWMA(b *testing.B) { - a := NewEWMA1() - b.ResetTimer() - for i := 0; i < b.N; i++ { - a.Update(1) - a.Tick() - } -} - -func TestEWMA1(t *testing.T) { - a := NewEWMA1() - a.Update(3) - a.Tick() - if rate := a.Rate(); 0.6 != rate { - t.Errorf("initial a.Rate(): 0.6 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.22072766470286553 != rate { - t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.08120116994196772 != rate { - t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.029872241020718428 != rate { - t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.01098938333324054 != rate { - t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.004042768199451294 != rate { - t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.0014872513059998212 != rate { - t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.0005471291793327122 != rate { - t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.00020127757674150815 != rate { - t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 7.404588245200814e-05 != rate { - t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 2.7239957857491083e-05 != rate { - t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 1.0021020474147462e-05 != rate { - t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 3.6865274119969525e-06 != rate { - t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 1.3561976441886433e-06 != rate { - t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 4.989172314621449e-07 != rate { - t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 1.8354139230109722e-07 != rate { - t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate) - } -} - -func TestEWMA5(t *testing.T) { - a := NewEWMA5() - a.Update(3) - a.Tick() - if rate := a.Rate(); 0.6 != rate { - t.Errorf("initial a.Rate(): 0.6 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.49123845184678905 != rate { - t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.4021920276213837 != rate { - t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.32928698165641596 != rate { - t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.269597378470333 != rate { - t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.2207276647028654 != rate { - t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.18071652714732128 != rate { - t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.14795817836496392 != rate { - t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.12113791079679326 != rate { - t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.09917933293295193 != rate { - t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.08120116994196763 != rate { - t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.06648189501740036 != rate { - t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.05443077197364752 != rate { - t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.04456414692860035 != rate { - t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.03648603757513079 != rate { - t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.0298722410207183831020718428 != rate { - t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate) - } -} - -func TestEWMA15(t *testing.T) { - a := NewEWMA15() - a.Update(3) - a.Tick() - if rate := a.Rate(); 0.6 != rate { - t.Errorf("initial a.Rate(): 0.6 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.5613041910189706 != rate { - t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.5251039914257684 != rate { - t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.4912384518467888184678905 != rate { - t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.459557003018789 != rate { - t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.4299187863442732 != rate { - t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.4021920276213831 != rate { - t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.37625345116383313 != rate { - t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.3519877317060185 != rate { - t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.3292869816564153165641596 != rate { - t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.3080502714195546 != rate { - t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.2881831806538789 != rate { - t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.26959737847033216 != rate { - t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.2522102307052083 != rate { - t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.23594443252115815 != rate { - t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate) - } - elapseMinute(a) - if rate := a.Rate(); 0.2207276647028646247028654470286553 != rate { - t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate) - } -} - -func elapseMinute(a EWMA) { - for i := 0; i < 12; i++ { - a.Tick() - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge.go deleted file mode 100644 index 807638a3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge.go +++ /dev/null @@ -1,84 +0,0 @@ -package metrics - -import "sync/atomic" - -// Gauges hold an int64 value that can be set arbitrarily. -type Gauge interface { - Snapshot() Gauge - Update(int64) - Value() int64 -} - -// GetOrRegisterGauge returns an existing Gauge or constructs and registers a -// new StandardGauge. -func GetOrRegisterGauge(name string, r Registry) Gauge { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGauge).(Gauge) -} - -// NewGauge constructs a new StandardGauge. -func NewGauge() Gauge { - if UseNilMetrics { - return NilGauge{} - } - return &StandardGauge{0} -} - -// NewRegisteredGauge constructs and registers a new StandardGauge. -func NewRegisteredGauge(name string, r Registry) Gauge { - c := NewGauge() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeSnapshot is a read-only copy of another Gauge. -type GaugeSnapshot int64 - -// Snapshot returns the snapshot. -func (g GaugeSnapshot) Snapshot() Gauge { return g } - -// Update panics. -func (GaugeSnapshot) Update(int64) { - panic("Update called on a GaugeSnapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeSnapshot) Value() int64 { return int64(g) } - -// NilGauge is a no-op Gauge. -type NilGauge struct{} - -// Snapshot is a no-op. -func (NilGauge) Snapshot() Gauge { return NilGauge{} } - -// Update is a no-op. -func (NilGauge) Update(v int64) {} - -// Value is a no-op. -func (NilGauge) Value() int64 { return 0 } - -// StandardGauge is the standard implementation of a Gauge and uses the -// sync/atomic package to manage a single int64 value. -type StandardGauge struct { - value int64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGauge) Snapshot() Gauge { - return GaugeSnapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGauge) Update(v int64) { - atomic.StoreInt64(&g.value, v) -} - -// Value returns the gauge's current value. -func (g *StandardGauge) Value() int64 { - return atomic.LoadInt64(&g.value) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64.go deleted file mode 100644 index 47c3566c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64.go +++ /dev/null @@ -1,91 +0,0 @@ -package metrics - -import "sync" - -// GaugeFloat64s hold a float64 value that can be set arbitrarily. -type GaugeFloat64 interface { - Snapshot() GaugeFloat64 - Update(float64) - Value() float64 -} - -// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a -// new StandardGaugeFloat64. -func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) -} - -// NewGaugeFloat64 constructs a new StandardGaugeFloat64. -func NewGaugeFloat64() GaugeFloat64 { - if UseNilMetrics { - return NilGaugeFloat64{} - } - return &StandardGaugeFloat64{ - value: 0.0, - } -} - -// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. -func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { - c := NewGaugeFloat64() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. -type GaugeFloat64Snapshot float64 - -// Snapshot returns the snapshot. -func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } - -// Update panics. -func (GaugeFloat64Snapshot) Update(float64) { - panic("Update called on a GaugeFloat64Snapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } - -// NilGauge is a no-op Gauge. -type NilGaugeFloat64 struct{} - -// Snapshot is a no-op. -func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } - -// Update is a no-op. -func (NilGaugeFloat64) Update(v float64) {} - -// Value is a no-op. -func (NilGaugeFloat64) Value() float64 { return 0.0 } - -// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses -// sync.Mutex to manage a single float64 value. -type StandardGaugeFloat64 struct { - mutex sync.Mutex - value float64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { - return GaugeFloat64Snapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGaugeFloat64) Update(v float64) { - g.mutex.Lock() - defer g.mutex.Unlock() - g.value = v -} - -// Value returns the gauge's current value. -func (g *StandardGaugeFloat64) Value() float64 { - g.mutex.Lock() - defer g.mutex.Unlock() - return g.value -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64_test.go deleted file mode 100644 index 5d0aae27..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package metrics - -import "testing" - -func BenchmarkGuageFloat64(b *testing.B) { - g := NewGaugeFloat64() - b.ResetTimer() - for i := 0; i < b.N; i++ { - g.Update(float64(i)) - } -} - -func TestGaugeFloat64(t *testing.T) { - g := NewGaugeFloat64() - g.Update(float64(47.0)) - if v := g.Value(); float64(47.0) != v { - t.Errorf("g.Value(): 47.0 != %v\n", v) - } -} - -func TestGaugeFloat64Snapshot(t *testing.T) { - g := NewGaugeFloat64() - g.Update(float64(47.0)) - snapshot := g.Snapshot() - g.Update(float64(0)) - if v := snapshot.Value(); float64(47.0) != v { - t.Errorf("g.Value(): 47.0 != %v\n", v) - } -} - -func TestGetOrRegisterGaugeFloat64(t *testing.T) { - r := NewRegistry() - NewRegisteredGaugeFloat64("foo", r).Update(float64(47.0)) - t.Logf("registry: %v", r) - if g := GetOrRegisterGaugeFloat64("foo", r); float64(47.0) != g.Value() { - t.Fatal(g) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_test.go deleted file mode 100644 index 50849629..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package metrics - -import "testing" - -func BenchmarkGuage(b *testing.B) { - g := NewGauge() - b.ResetTimer() - for i := 0; i < b.N; i++ { - g.Update(int64(i)) - } -} - -func TestGauge(t *testing.T) { - g := NewGauge() - g.Update(int64(47)) - if v := g.Value(); 47 != v { - t.Errorf("g.Value(): 47 != %v\n", v) - } -} - -func TestGaugeSnapshot(t *testing.T) { - g := NewGauge() - g.Update(int64(47)) - snapshot := g.Snapshot() - g.Update(int64(0)) - if v := snapshot.Value(); 47 != v { - t.Errorf("g.Value(): 47 != %v\n", v) - } -} - -func TestGetOrRegisterGauge(t *testing.T) { - r := NewRegistry() - NewRegisteredGauge("foo", r).Update(47) - if g := GetOrRegisterGauge("foo", r); 47 != g.Value() { - t.Fatal(g) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite.go deleted file mode 100644 index 643b3ec5..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite.go +++ /dev/null @@ -1,104 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "strconv" - "strings" - "time" -) - -// GraphiteConfig provides a container with configuration parameters for -// the Graphite exporter -type GraphiteConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names - Percentiles []float64 // Percentiles to export from timers and histograms -} - -// Graphite is a blocking exporter function which reports metrics in r -// to a graphite server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - GraphiteWithConfig(GraphiteConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, - }) -} - -// GraphiteWithConfig is a blocking exporter function just like Graphite, -// but it takes a GraphiteConfig instead. -func GraphiteWithConfig(c GraphiteConfig) { - for _ = range time.Tick(c.FlushInterval) { - if err := graphite(&c); nil != err { - log.Println(err) - } - } -} - -func graphite(c *GraphiteConfig) error { - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) - case Gauge: - fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) - case GaugeFloat64: - fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, int64(du)*t.Min(), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, int64(du)*t.Max(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, du*t.Mean(), now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, du*t.StdDev(), now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite_test.go deleted file mode 100644 index b49dc4bb..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package metrics - -import ( - "net" - "time" -) - -func ExampleGraphite() { - addr, _ := net.ResolveTCPAddr("net", ":2003") - go Graphite(DefaultRegistry, 1*time.Second, "some.prefix", addr) -} - -func ExampleGraphiteWithConfig() { - addr, _ := net.ResolveTCPAddr("net", ":2003") - go GraphiteWithConfig(GraphiteConfig{ - Addr: addr, - Registry: DefaultRegistry, - FlushInterval: 1 * time.Second, - DurationUnit: time.Millisecond, - Percentiles: []float64{ 0.5, 0.75, 0.99, 0.999 }, - }) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/healthcheck.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/healthcheck.go deleted file mode 100644 index 445131ca..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/healthcheck.go +++ /dev/null @@ -1,61 +0,0 @@ -package metrics - -// Healthchecks hold an error value describing an arbitrary up/down status. -type Healthcheck interface { - Check() - Error() error - Healthy() - Unhealthy(error) -} - -// NewHealthcheck constructs a new Healthcheck which will use the given -// function to update its status. -func NewHealthcheck(f func(Healthcheck)) Healthcheck { - if UseNilMetrics { - return NilHealthcheck{} - } - return &StandardHealthcheck{nil, f} -} - -// NilHealthcheck is a no-op. -type NilHealthcheck struct{} - -// Check is a no-op. -func (NilHealthcheck) Check() {} - -// Error is a no-op. -func (NilHealthcheck) Error() error { return nil } - -// Healthy is a no-op. -func (NilHealthcheck) Healthy() {} - -// Unhealthy is a no-op. -func (NilHealthcheck) Unhealthy(error) {} - -// StandardHealthcheck is the standard implementation of a Healthcheck and -// stores the status and a function to call to update the status. -type StandardHealthcheck struct { - err error - f func(Healthcheck) -} - -// Check runs the healthcheck function to update the healthcheck's status. -func (h *StandardHealthcheck) Check() { - h.f(h) -} - -// Error returns the healthcheck's status, which will be nil if it is healthy. -func (h *StandardHealthcheck) Error() error { - return h.err -} - -// Healthy marks the healthcheck as healthy. -func (h *StandardHealthcheck) Healthy() { - h.err = nil -} - -// Unhealthy marks the healthcheck as unhealthy. The error is stored and -// may be retrieved by the Error method. -func (h *StandardHealthcheck) Unhealthy(err error) { - h.err = err -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram.go deleted file mode 100644 index 7f3ee70c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram.go +++ /dev/null @@ -1,192 +0,0 @@ -package metrics - -// Histograms calculate distribution statistics from a series of int64 values. -type Histogram interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Sample() Sample - Snapshot() Histogram - StdDev() float64 - Update(int64) - Variance() float64 -} - -// GetOrRegisterHistogram returns an existing Histogram or constructs and -// registers a new StandardHistogram. -func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) -} - -// NewHistogram constructs a new StandardHistogram from a Sample. -func NewHistogram(s Sample) Histogram { - if UseNilMetrics { - return NilHistogram{} - } - return &StandardHistogram{sample: s} -} - -// NewRegisteredHistogram constructs and registers a new StandardHistogram from -// a Sample. -func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { - c := NewHistogram(s) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// HistogramSnapshot is a read-only copy of another Histogram. -type HistogramSnapshot struct { - sample *SampleSnapshot -} - -// Clear panics. -func (*HistogramSnapshot) Clear() { - panic("Clear called on a HistogramSnapshot") -} - -// Count returns the number of samples recorded at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample at the time the snapshot -// was taken. -func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the sample -// at the time the snapshot was taken. -func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *HistogramSnapshot) Sample() Sample { return h.sample } - -// Snapshot returns the snapshot. -func (h *HistogramSnapshot) Snapshot() Histogram { return h } - -// StdDev returns the standard deviation of the values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } - -// Update panics. -func (*HistogramSnapshot) Update(int64) { - panic("Update called on a HistogramSnapshot") -} - -// Variance returns the variance of inputs at the time the snapshot was taken. -func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } - -// NilHistogram is a no-op Histogram. -type NilHistogram struct{} - -// Clear is a no-op. -func (NilHistogram) Clear() {} - -// Count is a no-op. -func (NilHistogram) Count() int64 { return 0 } - -// Max is a no-op. -func (NilHistogram) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilHistogram) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilHistogram) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilHistogram) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilHistogram) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Sample is a no-op. -func (NilHistogram) Sample() Sample { return NilSample{} } - -// Snapshot is a no-op. -func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } - -// StdDev is a no-op. -func (NilHistogram) StdDev() float64 { return 0.0 } - -// Update is a no-op. -func (NilHistogram) Update(v int64) {} - -// Variance is a no-op. -func (NilHistogram) Variance() float64 { return 0.0 } - -// StandardHistogram is the standard implementation of a Histogram and uses a -// Sample to bound its memory use. -type StandardHistogram struct { - sample Sample -} - -// Clear clears the histogram and its sample. -func (h *StandardHistogram) Clear() { h.sample.Clear() } - -// Count returns the number of samples recorded since the histogram was last -// cleared. -func (h *StandardHistogram) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample. -func (h *StandardHistogram) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample. -func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample. -func (h *StandardHistogram) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of the values in the sample. -func (h *StandardHistogram) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (h *StandardHistogram) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *StandardHistogram) Sample() Sample { return h.sample } - -// Snapshot returns a read-only copy of the histogram. -func (h *StandardHistogram) Snapshot() Histogram { - return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} -} - -// StdDev returns the standard deviation of the values in the sample. -func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } - -// Update samples a new value. -func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } - -// Variance returns the variance of the values in the sample. -func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram_test.go deleted file mode 100644 index d7f4f017..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package metrics - -import "testing" - -func BenchmarkHistogram(b *testing.B) { - h := NewHistogram(NewUniformSample(100)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - h.Update(int64(i)) - } -} - -func TestGetOrRegisterHistogram(t *testing.T) { - r := NewRegistry() - s := NewUniformSample(100) - NewRegisteredHistogram("foo", r, s).Update(47) - if h := GetOrRegisterHistogram("foo", r, s); 1 != h.Count() { - t.Fatal(h) - } -} - -func TestHistogram10000(t *testing.T) { - h := NewHistogram(NewUniformSample(100000)) - for i := 1; i <= 10000; i++ { - h.Update(int64(i)) - } - testHistogram10000(t, h) -} - -func TestHistogramEmpty(t *testing.T) { - h := NewHistogram(NewUniformSample(100)) - if count := h.Count(); 0 != count { - t.Errorf("h.Count(): 0 != %v\n", count) - } - if min := h.Min(); 0 != min { - t.Errorf("h.Min(): 0 != %v\n", min) - } - if max := h.Max(); 0 != max { - t.Errorf("h.Max(): 0 != %v\n", max) - } - if mean := h.Mean(); 0.0 != mean { - t.Errorf("h.Mean(): 0.0 != %v\n", mean) - } - if stdDev := h.StdDev(); 0.0 != stdDev { - t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev) - } - ps := h.Percentiles([]float64{0.5, 0.75, 0.99}) - if 0.0 != ps[0] { - t.Errorf("median: 0.0 != %v\n", ps[0]) - } - if 0.0 != ps[1] { - t.Errorf("75th percentile: 0.0 != %v\n", ps[1]) - } - if 0.0 != ps[2] { - t.Errorf("99th percentile: 0.0 != %v\n", ps[2]) - } -} - -func TestHistogramSnapshot(t *testing.T) { - h := NewHistogram(NewUniformSample(100000)) - for i := 1; i <= 10000; i++ { - h.Update(int64(i)) - } - snapshot := h.Snapshot() - h.Update(0) - testHistogram10000(t, snapshot) -} - -func testHistogram10000(t *testing.T, h Histogram) { - if count := h.Count(); 10000 != count { - t.Errorf("h.Count(): 10000 != %v\n", count) - } - if min := h.Min(); 1 != min { - t.Errorf("h.Min(): 1 != %v\n", min) - } - if max := h.Max(); 10000 != max { - t.Errorf("h.Max(): 10000 != %v\n", max) - } - if mean := h.Mean(); 5000.5 != mean { - t.Errorf("h.Mean(): 5000.5 != %v\n", mean) - } - if stdDev := h.StdDev(); 2886.751331514372 != stdDev { - t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev) - } - ps := h.Percentiles([]float64{0.5, 0.75, 0.99}) - if 5000.5 != ps[0] { - t.Errorf("median: 5000.5 != %v\n", ps[0]) - } - if 7500.75 != ps[1] { - t.Errorf("75th percentile: 7500.75 != %v\n", ps[1]) - } - if 9900.99 != ps[2] { - t.Errorf("99th percentile: 9900.99 != %v\n", ps[2]) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/influxdb/influxdb.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/influxdb/influxdb.go deleted file mode 100644 index 0163c9b4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/influxdb/influxdb.go +++ /dev/null @@ -1,114 +0,0 @@ -package influxdb - -import ( - "fmt" - influxClient "github.com/influxdb/influxdb/client" - "github.com/rcrowley/go-metrics" - "log" - "time" -) - -type Config struct { - Host string - Database string - Username string - Password string -} - -func Influxdb(r metrics.Registry, d time.Duration, config *Config) { - client, err := influxClient.NewClient(&influxClient.ClientConfig{ - Host: config.Host, - Database: config.Database, - Username: config.Username, - Password: config.Password, - }) - if err != nil { - log.Println(err) - return - } - - for _ = range time.Tick(d) { - if err := send(r, client); err != nil { - log.Println(err) - } - } -} - -func send(r metrics.Registry, client *influxClient.Client) error { - series := []*influxClient.Series{} - - r.Each(func(name string, i interface{}) { - now := getCurrentTime() - switch metric := i.(type) { - case metrics.Counter: - series = append(series, &influxClient.Series{ - Name: fmt.Sprintf("%s.count", name), - Columns: []string{"time", "count"}, - Points: [][]interface{}{ - {now, metric.Count()}, - }, - }) - case metrics.Gauge: - series = append(series, &influxClient.Series{ - Name: fmt.Sprintf("%s.value", name), - Columns: []string{"time", "value"}, - Points: [][]interface{}{ - {now, metric.Value()}, - }, - }) - case metrics.GaugeFloat64: - series = append(series, &influxClient.Series{ - Name: fmt.Sprintf("%s.value", name), - Columns: []string{"time", "value"}, - Points: [][]interface{}{ - {now, metric.Value()}, - }, - }) - case metrics.Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - series = append(series, &influxClient.Series{ - Name: fmt.Sprintf("%s.histogram", name), - Columns: []string{"time", "count", "min", "max", "mean", "std-dev", - "50-percentile", "75-percentile", "95-percentile", - "99-percentile", "999-percentile"}, - Points: [][]interface{}{ - {now, h.Count(), h.Min(), h.Max(), h.Mean(), h.StdDev(), - ps[0], ps[1], ps[2], ps[3], ps[4]}, - }, - }) - case metrics.Meter: - m := metric.Snapshot() - series = append(series, &influxClient.Series{ - Name: fmt.Sprintf("%s.meter", name), - Columns: []string{"count", "one-minute", - "five-minute", "fifteen-minute", "mean"}, - Points: [][]interface{}{ - {m.Count(), m.Rate1(), m.Rate5(), m.Rate15(), m.RateMean()}, - }, - }) - case metrics.Timer: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - series = append(series, &influxClient.Series{ - Name: fmt.Sprintf("%s.timer", name), - Columns: []string{"count", "min", "max", "mean", "std-dev", - "50-percentile", "75-percentile", "95-percentile", - "99-percentile", "999-percentile", "one-minute", "five-minute", "fifteen-minute", "mean-rate"}, - Points: [][]interface{}{ - {h.Count(), h.Min(), h.Max(), h.Mean(), h.StdDev(), - ps[0], ps[1], ps[2], ps[3], ps[4], - h.Rate1(), h.Rate5(), h.Rate15(), h.RateMean()}, - }, - }) - } - }) - if err := client.WriteSeries(series); err != nil { - log.Println(err) - } - return nil -} - -func getCurrentTime() int64 { - return time.Now().UnixNano() / 1000000 -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json.go deleted file mode 100644 index 04a9c919..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json.go +++ /dev/null @@ -1,83 +0,0 @@ -package metrics - -import ( - "encoding/json" - "io" - "time" -) - -// MarshalJSON returns a byte slice containing a JSON representation of all -// the metrics in the Registry. -func (r StandardRegistry) MarshalJSON() ([]byte, error) { - data := make(map[string]map[string]interface{}) - r.Each(func(name string, i interface{}) { - values := make(map[string]interface{}) - switch metric := i.(type) { - case Counter: - values["count"] = metric.Count() - case Gauge: - values["value"] = metric.Value() - case GaugeFloat64: - values["value"] = metric.Value() - case Healthcheck: - values["error"] = nil - metric.Check() - if err := metric.Error(); nil != err { - values["error"] = metric.Error().Error() - } - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = h.Count() - values["min"] = h.Min() - values["max"] = h.Max() - values["mean"] = h.Mean() - values["stddev"] = h.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - case Meter: - m := metric.Snapshot() - values["count"] = m.Count() - values["1m.rate"] = m.Rate1() - values["5m.rate"] = m.Rate5() - values["15m.rate"] = m.Rate15() - values["mean.rate"] = m.RateMean() - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = t.Count() - values["min"] = t.Min() - values["max"] = t.Max() - values["mean"] = t.Mean() - values["stddev"] = t.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - values["1m.rate"] = t.Rate1() - values["5m.rate"] = t.Rate5() - values["15m.rate"] = t.Rate15() - values["mean.rate"] = t.RateMean() - } - data[name] = values - }) - return json.Marshal(data) -} - -// WriteJSON writes metrics from the given registry periodically to the -// specified io.Writer as JSON. -func WriteJSON(r Registry, d time.Duration, w io.Writer) { - for _ = range time.Tick(d) { - WriteJSONOnce(r, w) - } -} - -// WriteJSONOnce writes metrics from the given registry to the specified -// io.Writer as JSON. -func WriteJSONOnce(r Registry, w io.Writer) { - json.NewEncoder(w).Encode(r) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json_test.go deleted file mode 100644 index cf70051f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package metrics - -import ( - "bytes" - "encoding/json" - "testing" -) - -func TestRegistryMarshallJSON(t *testing.T) { - b := &bytes.Buffer{} - enc := json.NewEncoder(b) - r := NewRegistry() - r.Register("counter", NewCounter()) - enc.Encode(r) - if s := b.String(); "{\"counter\":{\"count\":0}}\n" != s { - t.Fatalf(s) - } -} - -func TestRegistryWriteJSONOnce(t *testing.T) { - r := NewRegistry() - r.Register("counter", NewCounter()) - b := &bytes.Buffer{} - WriteJSONOnce(r, b) - if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" { - t.Fail() - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/client.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/client.go deleted file mode 100644 index 8c0c850e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/client.go +++ /dev/null @@ -1,102 +0,0 @@ -package librato - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" -) - -const Operations = "operations" -const OperationsShort = "ops" - -type LibratoClient struct { - Email, Token string -} - -// property strings -const ( - // display attributes - Color = "color" - DisplayMax = "display_max" - DisplayMin = "display_min" - DisplayUnitsLong = "display_units_long" - DisplayUnitsShort = "display_units_short" - DisplayStacked = "display_stacked" - DisplayTransform = "display_transform" - // special gauge display attributes - SummarizeFunction = "summarize_function" - Aggregate = "aggregate" - - // metric keys - Name = "name" - Period = "period" - Description = "description" - DisplayName = "display_name" - Attributes = "attributes" - - // measurement keys - MeasureTime = "measure_time" - Source = "source" - Value = "value" - - // special gauge keys - Count = "count" - Sum = "sum" - Max = "max" - Min = "min" - SumSquares = "sum_squares" - - // batch keys - Counters = "counters" - Gauges = "gauges" - - MetricsPostUrl = "https://metrics-api.librato.com/v1/metrics" -) - -type Measurement map[string]interface{} -type Metric map[string]interface{} - -type Batch struct { - Gauges []Measurement `json:"gauges,omitempty"` - Counters []Measurement `json:"counters,omitempty"` - MeasureTime int64 `json:"measure_time"` - Source string `json:"source"` -} - -func (self *LibratoClient) PostMetrics(batch Batch) (err error) { - var ( - js []byte - req *http.Request - resp *http.Response - ) - - if len(batch.Counters) == 0 && len(batch.Gauges) == 0 { - return nil - } - - if js, err = json.Marshal(batch); err != nil { - return - } - - if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil { - return - } - - req.Header.Set("Content-Type", "application/json") - req.SetBasicAuth(self.Email, self.Token) - - if resp, err = http.DefaultClient.Do(req); err != nil { - return - } - - if resp.StatusCode != http.StatusOK { - var body []byte - if body, err = ioutil.ReadAll(resp.Body); err != nil { - body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err)) - } - err = fmt.Errorf("Unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body)) - } - return -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/librato.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/librato.go deleted file mode 100644 index 8cc35453..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/librato.go +++ /dev/null @@ -1,244 +0,0 @@ -package librato - -import ( - "fmt" - "github.com/yvasiyarov/go-metrics" - "log" - "math" - "regexp" - "time" - - //"github.com/rcrowley/go-metrics" -) - -// a regexp for extracting the unit from time.Duration.String -var unitRegexp = regexp.MustCompile("[^\\d]+$") - -// a helper that turns a time.Duration into librato display attributes for timer metrics -func translateTimerAttributes(d time.Duration) (attrs map[string]interface{}) { - attrs = make(map[string]interface{}) - attrs[DisplayTransform] = fmt.Sprintf("x/%d", int64(d)) - attrs[DisplayUnitsShort] = string(unitRegexp.Find([]byte(d.String()))) - return -} - -type Reporter struct { - Email, Token string - Source string - Interval time.Duration - Registry metrics.Registry - Percentiles []float64 // percentiles to report on histogram metrics - TimerAttributes map[string]interface{} // units in which timers will be displayed - MetricPrefix string -} - -func NewReporter(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) *Reporter { - return &Reporter{ - Email: e, - Token: t, - Source: s, - Interval: d, - Registry: r, - Percentiles: p, - TimerAttributes: translateTimerAttributes(u), - } -} - -func Librato(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) { - NewReporter(r, d, e, t, s, p, u).Run() -} - -func (self *Reporter) Run() { - ticker := time.Tick(self.Interval) - metricsApi := &LibratoClient{self.Email, self.Token} - for now := range ticker { - - var metrics Batch - var err error - if metrics, err = self.BuildRequest(now, self.Registry); err != nil { - log.Printf("ERROR constructing librato request body %s", err) - } - - if err := metricsApi.PostMetrics(metrics); err != nil { - log.Printf("ERROR sending metrics to librato %s", err) - } - } -} - -// calculate sum of squares from data provided by metrics.Histogram -// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods -func sumSquares(s metrics.Sample) float64 { - count := float64(s.Count()) - sumSquared := math.Pow(count*s.Mean(), 2) - sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count - if math.IsNaN(sumSquares) { - return 0.0 - } - return sumSquares -} -func sumSquaresTimer(t metrics.Timer) float64 { - count := float64(t.Count()) - sumSquared := math.Pow(count*t.Mean(), 2) - sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count - if math.IsNaN(sumSquares) { - return 0.0 - } - return sumSquares -} - -func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) { - snapshot = Batch{ - MeasureTime: now.Unix(), - Source: self.Source, - } - snapshot.MeasureTime = now.Unix() - snapshot.Gauges = make([]Measurement, 0) - snapshot.Counters = make([]Measurement, 0) - histogramGaugeCount := 1 + len(self.Percentiles) - r.Each(func(name string, metric interface{}) { - - if self.MetricPrefix != "" { - name = self.MetricPrefix + name - } - measurement := Measurement{} - measurement[Period] = self.Interval.Seconds() - - switch m := metric.(type) { - case metrics.Counter: - if m.Count() > 0 { - measurement[Name] = fmt.Sprintf("%s.%s", name, "count") - measurement[Value] = float64(m.Count()) - measurement[Attributes] = map[string]interface{}{ - DisplayUnitsLong: Operations, - DisplayUnitsShort: OperationsShort, - DisplayMin: "0", - } - snapshot.Counters = append(snapshot.Counters, measurement) - } - case metrics.Gauge: - measurement[Name] = name - measurement[Value] = float64(m.Value()) - snapshot.Gauges = append(snapshot.Gauges, measurement) - case metrics.GaugeFloat64: - measurement[Name] = name - measurement[Value] = float64(m.Value()) - snapshot.Gauges = append(snapshot.Gauges, measurement) - case metrics.Histogram: - if m.Count() > 0 { - gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount) - s := m.Sample() - measurement[Name] = fmt.Sprintf("%s.%s", name, "hist") - measurement[Count] = uint64(s.Count()) - measurement[Sum] = s.Sum() - measurement[Max] = float64(s.Max()) - measurement[Min] = float64(s.Min()) - measurement[SumSquares] = sumSquares(s) - gauges[0] = measurement - for i, p := range self.Percentiles { - gauges[i+1] = Measurement{ - Name: fmt.Sprintf("%s.%.2f", measurement[Name], p), - Value: s.Percentile(p), - Period: measurement[Period], - } - } - snapshot.Gauges = append(snapshot.Gauges, gauges...) - } - case metrics.Meter: - measurement[Name] = name - measurement[Value] = float64(m.Count()) - snapshot.Counters = append(snapshot.Counters, measurement) - snapshot.Gauges = append(snapshot.Gauges, - Measurement{ - Name: fmt.Sprintf("%s.%s", name, "1min"), - Value: m.Rate1(), - Period: int64(self.Interval.Seconds()), - Attributes: map[string]interface{}{ - DisplayUnitsLong: Operations, - DisplayUnitsShort: OperationsShort, - DisplayMin: "0", - }, - }, - Measurement{ - Name: fmt.Sprintf("%s.%s", name, "5min"), - Value: m.Rate5(), - Period: int64(self.Interval.Seconds()), - Attributes: map[string]interface{}{ - DisplayUnitsLong: Operations, - DisplayUnitsShort: OperationsShort, - DisplayMin: "0", - }, - }, - Measurement{ - Name: fmt.Sprintf("%s.%s", name, "15min"), - Value: m.Rate15(), - Period: int64(self.Interval.Seconds()), - Attributes: map[string]interface{}{ - DisplayUnitsLong: Operations, - DisplayUnitsShort: OperationsShort, - DisplayMin: "0", - }, - }, - ) - case metrics.Timer: - measurement[Name] = name - measurement[Value] = float64(m.Count()) - snapshot.Counters = append(snapshot.Counters, measurement) - if m.Count() > 0 { - libratoName := fmt.Sprintf("%s.%s", name, "timer.mean") - gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount) - gauges[0] = Measurement{ - Name: libratoName, - Count: uint64(m.Count()), - Sum: m.Mean() * float64(m.Count()), - Max: float64(m.Max()), - Min: float64(m.Min()), - SumSquares: sumSquaresTimer(m), - Period: int64(self.Interval.Seconds()), - Attributes: self.TimerAttributes, - } - for i, p := range self.Percentiles { - gauges[i+1] = Measurement{ - Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100), - Value: m.Percentile(p), - Period: int64(self.Interval.Seconds()), - Attributes: self.TimerAttributes, - } - } - snapshot.Gauges = append(snapshot.Gauges, gauges...) - snapshot.Gauges = append(snapshot.Gauges, - Measurement{ - Name: fmt.Sprintf("%s.%s", name, "rate.1min"), - Value: m.Rate1(), - Period: int64(self.Interval.Seconds()), - Attributes: map[string]interface{}{ - DisplayUnitsLong: Operations, - DisplayUnitsShort: OperationsShort, - DisplayMin: "0", - }, - }, - Measurement{ - Name: fmt.Sprintf("%s.%s", name, "rate.5min"), - Value: m.Rate5(), - Period: int64(self.Interval.Seconds()), - Attributes: map[string]interface{}{ - DisplayUnitsLong: Operations, - DisplayUnitsShort: OperationsShort, - DisplayMin: "0", - }, - }, - Measurement{ - Name: fmt.Sprintf("%s.%s", name, "rate.15min"), - Value: m.Rate15(), - Period: int64(self.Interval.Seconds()), - Attributes: map[string]interface{}{ - DisplayUnitsLong: Operations, - DisplayUnitsShort: OperationsShort, - DisplayMin: "0", - }, - }, - ) - } - } - }) - return -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/log.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/log.go deleted file mode 100644 index 278a8a44..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/log.go +++ /dev/null @@ -1,70 +0,0 @@ -package metrics - -import ( - "log" - "time" -) - -// Output each metric in the given registry periodically using the given -// logger. -func Log(r Registry, d time.Duration, l *log.Logger) { - for _ = range time.Tick(d) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - l.Printf("counter %s\n", name) - l.Printf(" count: %9d\n", metric.Count()) - case Gauge: - l.Printf("gauge %s\n", name) - l.Printf(" value: %9d\n", metric.Value()) - case GaugeFloat64: - l.Printf("gauge %s\n", name) - l.Printf(" value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - l.Printf("healthcheck %s\n", name) - l.Printf(" error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("histogram %s\n", name) - l.Printf(" count: %9d\n", h.Count()) - l.Printf(" min: %9d\n", h.Min()) - l.Printf(" max: %9d\n", h.Max()) - l.Printf(" mean: %12.2f\n", h.Mean()) - l.Printf(" stddev: %12.2f\n", h.StdDev()) - l.Printf(" median: %12.2f\n", ps[0]) - l.Printf(" 75%%: %12.2f\n", ps[1]) - l.Printf(" 95%%: %12.2f\n", ps[2]) - l.Printf(" 99%%: %12.2f\n", ps[3]) - l.Printf(" 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - l.Printf("meter %s\n", name) - l.Printf(" count: %9d\n", m.Count()) - l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) - l.Printf(" mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("timer %s\n", name) - l.Printf(" count: %9d\n", t.Count()) - l.Printf(" min: %9d\n", t.Min()) - l.Printf(" max: %9d\n", t.Max()) - l.Printf(" mean: %12.2f\n", t.Mean()) - l.Printf(" stddev: %12.2f\n", t.StdDev()) - l.Printf(" median: %12.2f\n", ps[0]) - l.Printf(" 75%%: %12.2f\n", ps[1]) - l.Printf(" 95%%: %12.2f\n", ps[2]) - l.Printf(" 99%%: %12.2f\n", ps[3]) - l.Printf(" 99.9%%: %12.2f\n", ps[4]) - l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) - l.Printf(" mean rate: %12.2f\n", t.RateMean()) - } - }) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/memory.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/memory.md deleted file mode 100644 index 47454f54..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/memory.md +++ /dev/null @@ -1,285 +0,0 @@ -Memory usage -============ - -(Highly unscientific.) - -Command used to gather static memory usage: - -```sh -grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" -``` - -Program used to gather baseline memory usage: - -```go -package main - -import "time" - -func main() { - time.Sleep(600e9) -} -``` - -Baseline --------- - -``` -VmPeak: 42604 kB -VmSize: 42604 kB -VmLck: 0 kB -VmHWM: 1120 kB -VmRSS: 1120 kB -VmData: 35460 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 36 kB -VmSwap: 0 kB -``` - -Program used to gather metric memory usage (with other metrics being similar): - -```go -package main - -import ( - "fmt" - "metrics" - "time" -) - -func main() { - fmt.Sprintf("foo") - metrics.NewRegistry() - time.Sleep(600e9) -} -``` - -1000 counters registered ------------------------- - -``` -VmPeak: 44016 kB -VmSize: 44016 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.412 kB virtual, TODO 0.808 kB resident per counter.** - -100000 counters registered --------------------------- - -``` -VmPeak: 55024 kB -VmSize: 55024 kB -VmLck: 0 kB -VmHWM: 12440 kB -VmRSS: 12440 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**0.1242 kB virtual, 0.1132 kB resident per counter.** - -1000 gauges registered ----------------------- - -``` -VmPeak: 44012 kB -VmSize: 44012 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.408 kB virtual, 0.808 kB resident per counter.** - -100000 gauges registered ------------------------- - -``` -VmPeak: 55020 kB -VmSize: 55020 kB -VmLck: 0 kB -VmHWM: 12432 kB -VmRSS: 12432 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 60 kB -VmSwap: 0 kB -``` - -**0.12416 kB virtual, 0.11312 resident per gauge.** - -1000 histograms with a uniform sample size of 1028 --------------------------------------------------- - -``` -VmPeak: 72272 kB -VmSize: 72272 kB -VmLck: 0 kB -VmHWM: 16204 kB -VmRSS: 16204 kB -VmData: 65100 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 80 kB -VmSwap: 0 kB -``` - -**29.668 kB virtual, TODO 15.084 resident per histogram.** - -10000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 256912 kB -VmSize: 256912 kB -VmLck: 0 kB -VmHWM: 146204 kB -VmRSS: 146204 kB -VmData: 249740 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 448 kB -VmSwap: 0 kB -``` - -**21.4308 kB virtual, 14.5084 kB resident per histogram.** - -50000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 908112 kB -VmSize: 908112 kB -VmLck: 0 kB -VmHWM: 645832 kB -VmRSS: 645588 kB -VmData: 900940 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1716 kB -VmSwap: 1544 kB -``` - -**17.31016 kB virtual, 12.88936 kB resident per histogram.** - -1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 -------------------------------------------------------------------------------------- - -``` -VmPeak: 62480 kB -VmSize: 62480 kB -VmLck: 0 kB -VmHWM: 11572 kB -VmRSS: 11572 kB -VmData: 55308 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**19.876 kB virtual, 10.452 kB resident per histogram.** - -10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 153296 kB -VmSize: 153296 kB -VmLck: 0 kB -VmHWM: 101176 kB -VmRSS: 101176 kB -VmData: 146124 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 240 kB -VmSwap: 0 kB -``` - -**11.0692 kB virtual, 10.0056 kB resident per histogram.** - -50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 557264 kB -VmSize: 557264 kB -VmLck: 0 kB -VmHWM: 501056 kB -VmRSS: 501056 kB -VmData: 550092 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1032 kB -VmSwap: 0 kB -``` - -**10.2932 kB virtual, 9.99872 kB resident per histogram.** - -1000 meters ------------ - -``` -VmPeak: 74504 kB -VmSize: 74504 kB -VmLck: 0 kB -VmHWM: 24124 kB -VmRSS: 24124 kB -VmData: 67340 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 92 kB -VmSwap: 0 kB -``` - -**31.9 kB virtual, 23.004 kB resident per meter.** - -10000 meters ------------- - -``` -VmPeak: 278920 kB -VmSize: 278920 kB -VmLck: 0 kB -VmHWM: 227300 kB -VmRSS: 227300 kB -VmData: 271756 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 488 kB -VmSwap: 0 kB -``` - -**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter.go deleted file mode 100644 index 0389ab0b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter.go +++ /dev/null @@ -1,233 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Meters count events to produce exponentially-weighted moving average rates -// at one-, five-, and fifteen-minutes and a mean rate. -type Meter interface { - Count() int64 - Mark(int64) - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Meter -} - -// GetOrRegisterMeter returns an existing Meter or constructs and registers a -// new StandardMeter. -func GetOrRegisterMeter(name string, r Registry) Meter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewMeter).(Meter) -} - -// NewMeter constructs a new StandardMeter and launches a goroutine. -func NewMeter() Meter { - if UseNilMetrics { - return NilMeter{} - } - m := newStandardMeter() - arbiter.Lock() - defer arbiter.Unlock() - arbiter.meters = append(arbiter.meters, m) - if !arbiter.started { - arbiter.started = true - go arbiter.tick() - } - return m -} - -// NewMeter constructs and registers a new StandardMeter and launches a -// goroutine. -func NewRegisteredMeter(name string, r Registry) Meter { - c := NewMeter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// MeterSnapshot is a read-only copy of another Meter. -type MeterSnapshot struct { - count int64 - rate1, rate5, rate15, rateMean float64 -} - -// Count returns the count of events at the time the snapshot was taken. -func (m *MeterSnapshot) Count() int64 { return m.count } - -// Mark panics. -func (*MeterSnapshot) Mark(n int64) { - panic("Mark called on a MeterSnapshot") -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } - -// Snapshot returns the snapshot. -func (m *MeterSnapshot) Snapshot() Meter { return m } - -// NilMeter is a no-op Meter. -type NilMeter struct{} - -// Count is a no-op. -func (NilMeter) Count() int64 { return 0 } - -// Mark is a no-op. -func (NilMeter) Mark(n int64) {} - -// Rate1 is a no-op. -func (NilMeter) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilMeter) Rate5() float64 { return 0.0 } - -// Rate15is a no-op. -func (NilMeter) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilMeter) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilMeter) Snapshot() Meter { return NilMeter{} } - -// StandardMeter is the standard implementation of a Meter. -type StandardMeter struct { - lock sync.RWMutex - snapshot *MeterSnapshot - a1, a5, a15 EWMA - startTime time.Time -} - -func newStandardMeter() *StandardMeter { - return &StandardMeter{ - snapshot: &MeterSnapshot{}, - a1: NewEWMA1(), - a5: NewEWMA5(), - a15: NewEWMA15(), - startTime: time.Now(), - } -} - -// Count returns the number of events recorded. -func (m *StandardMeter) Count() int64 { - m.lock.RLock() - count := m.snapshot.count - m.lock.RUnlock() - return count -} - -// Mark records the occurance of n events. -func (m *StandardMeter) Mark(n int64) { - m.lock.Lock() - defer m.lock.Unlock() - m.snapshot.count += n - m.a1.Update(n) - m.a5.Update(n) - m.a15.Update(n) - m.updateSnapshot() -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (m *StandardMeter) Rate1() float64 { - m.lock.RLock() - rate1 := m.snapshot.rate1 - m.lock.RUnlock() - return rate1 -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (m *StandardMeter) Rate5() float64 { - m.lock.RLock() - rate5 := m.snapshot.rate5 - m.lock.RUnlock() - return rate5 -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (m *StandardMeter) Rate15() float64 { - m.lock.RLock() - rate15 := m.snapshot.rate15 - m.lock.RUnlock() - return rate15 -} - -// RateMean returns the meter's mean rate of events per second. -func (m *StandardMeter) RateMean() float64 { - m.lock.RLock() - rateMean := m.snapshot.rateMean - m.lock.RUnlock() - return rateMean -} - -// Snapshot returns a read-only copy of the meter. -func (m *StandardMeter) Snapshot() Meter { - m.lock.RLock() - snapshot := *m.snapshot - m.lock.RUnlock() - return &snapshot -} - -func (m *StandardMeter) updateSnapshot() { - // should run with write lock held on m.lock - snapshot := m.snapshot - snapshot.rate1 = m.a1.Rate() - snapshot.rate5 = m.a5.Rate() - snapshot.rate15 = m.a15.Rate() - snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() -} - -func (m *StandardMeter) tick() { - m.lock.Lock() - defer m.lock.Unlock() - m.a1.Tick() - m.a5.Tick() - m.a15.Tick() - m.updateSnapshot() -} - -type meterArbiter struct { - sync.RWMutex - started bool - meters []*StandardMeter - ticker *time.Ticker -} - -var arbiter = meterArbiter{ticker: time.NewTicker(5e9)} - -// Ticks meters on the scheduled interval -func (ma *meterArbiter) tick() { - for { - select { - case <-ma.ticker.C: - ma.tickMeters() - } - } -} - -func (ma *meterArbiter) tickMeters() { - ma.RLock() - defer ma.RUnlock() - for _, meter := range ma.meters { - meter.tick() - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter_test.go deleted file mode 100644 index 26ce1398..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package metrics - -import ( - "testing" - "time" -) - -func BenchmarkMeter(b *testing.B) { - m := NewMeter() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Mark(1) - } -} - -func TestGetOrRegisterMeter(t *testing.T) { - r := NewRegistry() - NewRegisteredMeter("foo", r).Mark(47) - if m := GetOrRegisterMeter("foo", r); 47 != m.Count() { - t.Fatal(m) - } -} - -func TestMeterDecay(t *testing.T) { - ma := meterArbiter{ - ticker: time.NewTicker(1), - } - m := newStandardMeter() - ma.meters = append(ma.meters, m) - go ma.tick() - m.Mark(1) - rateMean := m.RateMean() - time.Sleep(1) - if m.RateMean() >= rateMean { - t.Error("m.RateMean() didn't decrease") - } -} - -func TestMeterNonzero(t *testing.T) { - m := NewMeter() - m.Mark(3) - if count := m.Count(); 3 != count { - t.Errorf("m.Count(): 3 != %v\n", count) - } -} - -func TestMeterSnapshot(t *testing.T) { - m := NewMeter() - m.Mark(1) - if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() { - t.Fatal(snapshot) - } -} - -func TestMeterZero(t *testing.T) { - m := NewMeter() - if count := m.Count(); 0 != count { - t.Errorf("m.Count(): 0 != %v\n", count) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics.go deleted file mode 100644 index b97a49ed..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics.go +++ /dev/null @@ -1,13 +0,0 @@ -// Go port of Coda Hale's Metrics library -// -// -// -// Coda Hale's original work: -package metrics - -// UseNilMetrics is checked by the constructor functions for all of the -// standard metrics. If it is true, the metric returned is a stub. -// -// This global kill-switch helps quantify the observer effect and makes -// for less cluttered pprof profiles. -var UseNilMetrics bool = false diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics_test.go deleted file mode 100644 index 083f9676..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package metrics - -import ( - "io/ioutil" - "log" - "sync" - "testing" -) - -const FANOUT = 128 - -// Stop the compiler from complaining during debugging. -var ( - _ = ioutil.Discard - _ = log.LstdFlags -) - -func BenchmarkMetrics(b *testing.B) { - r := NewRegistry() - c := NewRegisteredCounter("counter", r) - g := NewRegisteredGauge("gauge", r) - gf := NewRegisteredGaugeFloat64("gaugefloat64", r) - h := NewRegisteredHistogram("histogram", r, NewUniformSample(100)) - m := NewRegisteredMeter("meter", r) - t := NewRegisteredTimer("timer", r) - RegisterDebugGCStats(r) - RegisterRuntimeMemStats(r) - b.ResetTimer() - ch := make(chan bool) - - wgD := &sync.WaitGroup{} - /* - wgD.Add(1) - go func() { - defer wgD.Done() - //log.Println("go CaptureDebugGCStats") - for { - select { - case <-ch: - //log.Println("done CaptureDebugGCStats") - return - default: - CaptureDebugGCStatsOnce(r) - } - } - }() - //*/ - - wgR := &sync.WaitGroup{} - //* - wgR.Add(1) - go func() { - defer wgR.Done() - //log.Println("go CaptureRuntimeMemStats") - for { - select { - case <-ch: - //log.Println("done CaptureRuntimeMemStats") - return - default: - CaptureRuntimeMemStatsOnce(r) - } - } - }() - //*/ - - wgW := &sync.WaitGroup{} - /* - wgW.Add(1) - go func() { - defer wgW.Done() - //log.Println("go Write") - for { - select { - case <-ch: - //log.Println("done Write") - return - default: - WriteOnce(r, ioutil.Discard) - } - } - }() - //*/ - - wg := &sync.WaitGroup{} - wg.Add(FANOUT) - for i := 0; i < FANOUT; i++ { - go func(i int) { - defer wg.Done() - //log.Println("go", i) - for i := 0; i < b.N; i++ { - c.Inc(1) - g.Update(int64(i)) - gf.Update(float64(i)) - h.Update(int64(i)) - m.Mark(1) - t.Update(1) - } - //log.Println("done", i) - }(i) - } - wg.Wait() - close(ch) - wgD.Wait() - wgR.Wait() - wgW.Wait() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb.go deleted file mode 100644 index fbc292de..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb.go +++ /dev/null @@ -1,119 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "time" - "os" - "strings" -) - -var shortHostName string = "" - -// OpenTSDBConfig provides a container with configuration parameters for -// the OpenTSDB exporter -type OpenTSDBConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names -} - -// OpenTSDB is a blocking exporter function which reports metrics in r -// to a TSDB server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - OpenTSDBWithConfig(OpenTSDBConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - }) -} - -// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, -// but it takes a OpenTSDBConfig instead. -func OpenTSDBWithConfig(c OpenTSDBConfig) { - for _ = range time.Tick(c.FlushInterval) { - if err := openTSDB(&c); nil != err { - log.Println(err) - } - } -} - -func getShortHostname() string { - if shortHostName == "" { - host, _ := os.Hostname() - if index := strings.Index(host, "."); index > 0 { - shortHostName = host[:index] - } else { - shortHostName = host - } - } - return shortHostName -} - -func openTSDB(c *OpenTSDBConfig) error { - shortHostname := getShortHostname() - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) - case Gauge: - fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case GaugeFloat64: - fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Min(), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Max(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, du*t.Mean(), shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, du*t.StdDev(), shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[0], shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[1], shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[2], shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[3], shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[4], shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb_test.go deleted file mode 100644 index 6173d61a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package metrics - -import ( - "net" - "time" -) - -func ExampleOpenTSDB() { - addr, _ := net.ResolveTCPAddr("net", ":2003") - go OpenTSDB(DefaultRegistry, 1*time.Second, "some.prefix", addr) -} - -func ExampleOpenTSDBWithConfig() { - addr, _ := net.ResolveTCPAddr("net", ":2003") - go OpenTSDBWithConfig(OpenTSDBConfig{ - Addr: addr, - Registry: DefaultRegistry, - FlushInterval: 1 * time.Second, - DurationUnit: time.Millisecond, - }) -} - diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry.go deleted file mode 100644 index 9ef498a2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry.go +++ /dev/null @@ -1,168 +0,0 @@ -package metrics - -import ( - "fmt" - "reflect" - "sync" -) - -// DuplicateMetric is the error returned by Registry.Register when a metric -// already exists. If you mean to Register that metric you must first -// Unregister the existing metric. -type DuplicateMetric string - -func (err DuplicateMetric) Error() string { - return fmt.Sprintf("duplicate metric: %s", string(err)) -} - -// A Registry holds references to a set of metrics by name and can iterate -// over them, calling callback functions provided by the user. -// -// This is an interface so as to encourage other structs to implement -// the Registry API as appropriate. -type Registry interface { - - // Call the given function for each registered metric. - Each(func(string, interface{})) - - // Get the metric by the given name or nil if none is registered. - Get(string) interface{} - - // Gets an existing metric or registers the given one. - // The interface can be the metric to register if not found in registry, - // or a function returning the metric for lazy instantiation. - GetOrRegister(string, interface{}) interface{} - - // Register the given metric under the given name. - Register(string, interface{}) error - - // Run all registered healthchecks. - RunHealthchecks() - - // Unregister the metric with the given name. - Unregister(string) -} - -// The standard implementation of a Registry is a mutex-protected map -// of names to metrics. -type StandardRegistry struct { - metrics map[string]interface{} - mutex sync.Mutex -} - -// Create a new registry. -func NewRegistry() Registry { - return &StandardRegistry{metrics: make(map[string]interface{})} -} - -// Call the given function for each registered metric. -func (r *StandardRegistry) Each(f func(string, interface{})) { - for name, i := range r.registered() { - f(name, i) - } -} - -// Get the metric by the given name or nil if none is registered. -func (r *StandardRegistry) Get(name string) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.metrics[name] -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - if metric, ok := r.metrics[name]; ok { - return metric - } - if v := reflect.ValueOf(i); v.Kind() == reflect.Func { - i = v.Call(nil)[0].Interface() - } - r.register(name, i) - return i -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func (r *StandardRegistry) Register(name string, i interface{}) error { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.register(name, i) -} - -// Run all registered healthchecks. -func (r *StandardRegistry) RunHealthchecks() { - r.mutex.Lock() - defer r.mutex.Unlock() - for _, i := range r.metrics { - if h, ok := i.(Healthcheck); ok { - h.Check() - } - } -} - -// Unregister the metric with the given name. -func (r *StandardRegistry) Unregister(name string) { - r.mutex.Lock() - defer r.mutex.Unlock() - delete(r.metrics, name) -} - -func (r *StandardRegistry) register(name string, i interface{}) error { - if _, ok := r.metrics[name]; ok { - return DuplicateMetric(name) - } - switch i.(type) { - case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: - r.metrics[name] = i - } - return nil -} - -func (r *StandardRegistry) registered() map[string]interface{} { - metrics := make(map[string]interface{}, len(r.metrics)) - r.mutex.Lock() - defer r.mutex.Unlock() - for name, i := range r.metrics { - metrics[name] = i - } - return metrics -} - -var DefaultRegistry Registry = NewRegistry() - -// Call the given function for each registered metric. -func Each(f func(string, interface{})) { - DefaultRegistry.Each(f) -} - -// Get the metric by the given name or nil if none is registered. -func Get(name string) interface{} { - return DefaultRegistry.Get(name) -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -func GetOrRegister(name string, i interface{}) interface{} { - return DefaultRegistry.GetOrRegister(name, i) -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func Register(name string, i interface{}) error { - return DefaultRegistry.Register(name, i) -} - -// Run all registered healthchecks. -func RunHealthchecks() { - DefaultRegistry.RunHealthchecks() -} - -// Unregister the metric with the given name. -func Unregister(name string) { - DefaultRegistry.Unregister(name) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry_test.go deleted file mode 100644 index 9ba0a020..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package metrics - -import "testing" - -func BenchmarkRegistry(b *testing.B) { - r := NewRegistry() - r.Register("foo", NewCounter()) - b.ResetTimer() - for i := 0; i < b.N; i++ { - r.Each(func(string, interface{}) {}) - } -} - -func TestRegistry(t *testing.T) { - r := NewRegistry() - r.Register("foo", NewCounter()) - i := 0 - r.Each(func(name string, iface interface{}) { - i++ - if "foo" != name { - t.Fatal(name) - } - if _, ok := iface.(Counter); !ok { - t.Fatal(iface) - } - }) - if 1 != i { - t.Fatal(i) - } - r.Unregister("foo") - i = 0 - r.Each(func(string, interface{}) { i++ }) - if 0 != i { - t.Fatal(i) - } -} - -func TestRegistryDuplicate(t *testing.T) { - r := NewRegistry() - if err := r.Register("foo", NewCounter()); nil != err { - t.Fatal(err) - } - if err := r.Register("foo", NewGauge()); nil == err { - t.Fatal(err) - } - i := 0 - r.Each(func(name string, iface interface{}) { - i++ - if _, ok := iface.(Counter); !ok { - t.Fatal(iface) - } - }) - if 1 != i { - t.Fatal(i) - } -} - -func TestRegistryGet(t *testing.T) { - r := NewRegistry() - r.Register("foo", NewCounter()) - if count := r.Get("foo").(Counter).Count(); 0 != count { - t.Fatal(count) - } - r.Get("foo").(Counter).Inc(1) - if count := r.Get("foo").(Counter).Count(); 1 != count { - t.Fatal(count) - } -} - -func TestRegistryGetOrRegister(t *testing.T) { - r := NewRegistry() - - // First metric wins with GetOrRegister - _ = r.GetOrRegister("foo", NewCounter()) - m := r.GetOrRegister("foo", NewGauge()) - if _, ok := m.(Counter); !ok { - t.Fatal(m) - } - - i := 0 - r.Each(func(name string, iface interface{}) { - i++ - if name != "foo" { - t.Fatal(name) - } - if _, ok := iface.(Counter); !ok { - t.Fatal(iface) - } - }) - if i != 1 { - t.Fatal(i) - } -} - -func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) { - r := NewRegistry() - - // First metric wins with GetOrRegister - _ = r.GetOrRegister("foo", NewCounter) - m := r.GetOrRegister("foo", NewGauge) - if _, ok := m.(Counter); !ok { - t.Fatal(m) - } - - i := 0 - r.Each(func(name string, iface interface{}) { - i++ - if name != "foo" { - t.Fatal(name) - } - if _, ok := iface.(Counter); !ok { - t.Fatal(iface) - } - }) - if i != 1 { - t.Fatal(i) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime.go deleted file mode 100644 index 82574bf2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime.go +++ /dev/null @@ -1,200 +0,0 @@ -package metrics - -import ( - "runtime" - "time" -) - -var ( - memStats runtime.MemStats - runtimeMetrics struct { - MemStats struct { - Alloc Gauge - BuckHashSys Gauge - DebugGC Gauge - EnableGC Gauge - Frees Gauge - HeapAlloc Gauge - HeapIdle Gauge - HeapInuse Gauge - HeapObjects Gauge - HeapReleased Gauge - HeapSys Gauge - LastGC Gauge - Lookups Gauge - Mallocs Gauge - MCacheInuse Gauge - MCacheSys Gauge - MSpanInuse Gauge - MSpanSys Gauge - NextGC Gauge - NumGC Gauge - PauseNs Histogram - PauseTotalNs Gauge - StackInuse Gauge - StackSys Gauge - Sys Gauge - TotalAlloc Gauge - } - NumCgoCall Gauge - NumGoroutine Gauge - ReadMemStats Timer - } - frees uint64 - lookups uint64 - mallocs uint64 - numGC uint32 - numCgoCalls int64 -) - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called as a goroutine. -func CaptureRuntimeMemStats(r Registry, d time.Duration) { - for _ = range time.Tick(d) { - CaptureRuntimeMemStatsOnce(r) - } -} - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called in a background -// goroutine. Giving a registry which has not been given to -// RegisterRuntimeMemStats will panic. -// -// Be very careful with this because runtime.ReadMemStats calls the C -// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() -// and that last one does what it says on the tin. -func CaptureRuntimeMemStatsOnce(r Registry) { - t := time.Now() - runtime.ReadMemStats(&memStats) // This takes 50-200us. - runtimeMetrics.ReadMemStats.UpdateSince(t) - - runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) - runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) - if memStats.DebugGC { - runtimeMetrics.MemStats.DebugGC.Update(1) - } else { - runtimeMetrics.MemStats.DebugGC.Update(0) - } - if memStats.EnableGC { - runtimeMetrics.MemStats.EnableGC.Update(1) - } else { - runtimeMetrics.MemStats.EnableGC.Update(0) - } - - runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) - runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) - runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) - runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) - runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) - runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) - runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) - runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) - runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) - runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) - runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) - runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) - runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) - runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) - runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) - runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) - - // - i := numGC % uint32(len(memStats.PauseNs)) - ii := memStats.NumGC % uint32(len(memStats.PauseNs)) - if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { - for i = 0; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } else { - if i > ii { - for ; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - i = 0 - } - for ; i < ii; i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } - frees = memStats.Frees - lookups = memStats.Lookups - mallocs = memStats.Mallocs - numGC = memStats.NumGC - - runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) - runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) - runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) - runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) - runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) - - currentNumCgoCalls := numCgoCall() - runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) - numCgoCalls = currentNumCgoCalls - - runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) -} - -// Register runtimeMetrics for the Go runtime statistics exported in runtime and -// specifically runtime.MemStats. The runtimeMetrics are named by their -// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. -func RegisterRuntimeMemStats(r Registry) { - runtimeMetrics.MemStats.Alloc = NewGauge() - runtimeMetrics.MemStats.BuckHashSys = NewGauge() - runtimeMetrics.MemStats.DebugGC = NewGauge() - runtimeMetrics.MemStats.EnableGC = NewGauge() - runtimeMetrics.MemStats.Frees = NewGauge() - runtimeMetrics.MemStats.HeapAlloc = NewGauge() - runtimeMetrics.MemStats.HeapIdle = NewGauge() - runtimeMetrics.MemStats.HeapInuse = NewGauge() - runtimeMetrics.MemStats.HeapObjects = NewGauge() - runtimeMetrics.MemStats.HeapReleased = NewGauge() - runtimeMetrics.MemStats.HeapSys = NewGauge() - runtimeMetrics.MemStats.LastGC = NewGauge() - runtimeMetrics.MemStats.Lookups = NewGauge() - runtimeMetrics.MemStats.Mallocs = NewGauge() - runtimeMetrics.MemStats.MCacheInuse = NewGauge() - runtimeMetrics.MemStats.MCacheSys = NewGauge() - runtimeMetrics.MemStats.MSpanInuse = NewGauge() - runtimeMetrics.MemStats.MSpanSys = NewGauge() - runtimeMetrics.MemStats.NextGC = NewGauge() - runtimeMetrics.MemStats.NumGC = NewGauge() - runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) - runtimeMetrics.MemStats.PauseTotalNs = NewGauge() - runtimeMetrics.MemStats.StackInuse = NewGauge() - runtimeMetrics.MemStats.StackSys = NewGauge() - runtimeMetrics.MemStats.Sys = NewGauge() - runtimeMetrics.MemStats.TotalAlloc = NewGauge() - runtimeMetrics.NumCgoCall = NewGauge() - runtimeMetrics.NumGoroutine = NewGauge() - runtimeMetrics.ReadMemStats = NewTimer() - - r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) - r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) - r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) - r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) - r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) - r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) - r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) - r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) - r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) - r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) - r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) - r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) - r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) - r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) - r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) - r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) - r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) - r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) - r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) - r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) - r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) - r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) - r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) - r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) - r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) - r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) - r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) - r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) - r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_cgo.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_cgo.go deleted file mode 100644 index 38976a8c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_cgo.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build cgo - -package metrics - -import "runtime" - -func numCgoCall() int64 { - return runtime.NumCgoCall() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go deleted file mode 100644 index 38220330..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !cgo - -package metrics - -func numCgoCall() int64 { - return 0 -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_test.go deleted file mode 100644 index a0ca8947..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package metrics - -import ( - "runtime" - "testing" - "time" -) - -func BenchmarkRuntimeMemStats(b *testing.B) { - r := NewRegistry() - RegisterRuntimeMemStats(r) - b.ResetTimer() - for i := 0; i < b.N; i++ { - CaptureRuntimeMemStatsOnce(r) - } -} - -func TestRuntimeMemStats(t *testing.T) { - r := NewRegistry() - RegisterRuntimeMemStats(r) - CaptureRuntimeMemStatsOnce(r) - zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests. - runtime.GC() - CaptureRuntimeMemStatsOnce(r) - if count := runtimeMetrics.MemStats.PauseNs.Count(); 1 != count-zero { - t.Fatal(count - zero) - } - runtime.GC() - runtime.GC() - CaptureRuntimeMemStatsOnce(r) - if count := runtimeMetrics.MemStats.PauseNs.Count(); 3 != count-zero { - t.Fatal(count - zero) - } - for i := 0; i < 256; i++ { - runtime.GC() - } - CaptureRuntimeMemStatsOnce(r) - if count := runtimeMetrics.MemStats.PauseNs.Count(); 259 != count-zero { - t.Fatal(count - zero) - } - for i := 0; i < 257; i++ { - runtime.GC() - } - CaptureRuntimeMemStatsOnce(r) - if count := runtimeMetrics.MemStats.PauseNs.Count(); 515 != count-zero { // We lost one because there were too many GCs between captures. - t.Fatal(count - zero) - } -} - -func TestRuntimeMemStatsBlocking(t *testing.T) { - if g := runtime.GOMAXPROCS(0); g < 2 { - t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g) - } - ch := make(chan int) - go testRuntimeMemStatsBlocking(ch) - var memStats runtime.MemStats - t0 := time.Now() - runtime.ReadMemStats(&memStats) - t1 := time.Now() - t.Log("i++ during runtime.ReadMemStats:", <-ch) - go testRuntimeMemStatsBlocking(ch) - d := t1.Sub(t0) - t.Log(d) - time.Sleep(d) - t.Log("i++ during time.Sleep:", <-ch) -} - -func testRuntimeMemStatsBlocking(ch chan int) { - i := 0 - for { - select { - case ch <- i: - return - default: - i++ - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample.go deleted file mode 100644 index e34b7b58..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample.go +++ /dev/null @@ -1,568 +0,0 @@ -package metrics - -import ( - "container/heap" - "math" - "math/rand" - "sort" - "sync" - "time" -) - -const rescaleThreshold = time.Hour - -// Samples maintain a statistically-significant selection of values from -// a stream. -type Sample interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Size() int - Snapshot() Sample - StdDev() float64 - Sum() int64 - Update(int64) - Values() []int64 - Variance() float64 -} - -// ExpDecaySample is an exponentially-decaying sample using a forward-decaying -// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time -// Decay Model for Streaming Systems". -// -// -type ExpDecaySample struct { - alpha float64 - count int64 - mutex sync.Mutex - reservoirSize int - t0, t1 time.Time - values expDecaySampleHeap -} - -// NewExpDecaySample constructs a new exponentially-decaying sample with the -// given reservoir size and alpha. -func NewExpDecaySample(reservoirSize int, alpha float64) Sample { - if UseNilMetrics { - return NilSample{} - } - s := &ExpDecaySample{ - alpha: alpha, - reservoirSize: reservoirSize, - t0: time.Now(), - values: make(expDecaySampleHeap, 0, reservoirSize), - } - s.t1 = time.Now().Add(rescaleThreshold) - return s -} - -// Clear clears all samples. -func (s *ExpDecaySample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.t0 = time.Now() - s.t1 = s.t0.Add(rescaleThreshold) - s.values = make(expDecaySampleHeap, 0, s.reservoirSize) -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *ExpDecaySample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *ExpDecaySample) Max() int64 { - return SampleMax(s.Values()) -} - -// Mean returns the mean of the values in the sample. -func (s *ExpDecaySample) Mean() float64 { - return SampleMean(s.Values()) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *ExpDecaySample) Min() int64 { - return SampleMin(s.Values()) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *ExpDecaySample) Percentile(p float64) float64 { - return SamplePercentile(s.Values(), p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.Values(), ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *ExpDecaySample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return len(s.values) -} - -// Snapshot returns a read-only copy of the sample. -func (s *ExpDecaySample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - for i, v := range s.values { - values[i] = v.v - } - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *ExpDecaySample) StdDev() float64 { - return SampleStdDev(s.Values()) -} - -// Sum returns the sum of the values in the sample. -func (s *ExpDecaySample) Sum() int64 { - return SampleSum(s.Values()) -} - -// Update samples a new value. -func (s *ExpDecaySample) Update(v int64) { - s.update(time.Now(), v) -} - -// Values returns a copy of the values in the sample. -func (s *ExpDecaySample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - for i, v := range s.values { - values[i] = v.v - } - return values -} - -// Variance returns the variance of the values in the sample. -func (s *ExpDecaySample) Variance() float64 { - return SampleVariance(s.Values()) -} - -// update samples a new value at a particular timestamp. This is a method all -// its own to facilitate testing. -func (s *ExpDecaySample) update(t time.Time, v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if len(s.values) == s.reservoirSize { - heap.Pop(&s.values) - } - heap.Push(&s.values, expDecaySample{ - k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), - v: v, - }) - if t.After(s.t1) { - values := s.values - t0 := s.t0 - s.values = make(expDecaySampleHeap, 0, s.reservoirSize) - s.t0 = t - s.t1 = s.t0.Add(rescaleThreshold) - for _, v := range values { - v.k = v.k * math.Exp(-s.alpha*float64(s.t0.Sub(t0))) - heap.Push(&s.values, v) - } - } -} - -// NilSample is a no-op Sample. -type NilSample struct{} - -// Clear is a no-op. -func (NilSample) Clear() {} - -// Count is a no-op. -func (NilSample) Count() int64 { return 0 } - -// Max is a no-op. -func (NilSample) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilSample) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilSample) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilSample) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilSample) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Size is a no-op. -func (NilSample) Size() int { return 0 } - -// Sample is a no-op. -func (NilSample) Snapshot() Sample { return NilSample{} } - -// StdDev is a no-op. -func (NilSample) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilSample) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilSample) Update(v int64) {} - -// Values is a no-op. -func (NilSample) Values() []int64 { return []int64{} } - -// Variance is a no-op. -func (NilSample) Variance() float64 { return 0.0 } - -// SampleMax returns the maximum value of the slice of int64. -func SampleMax(values []int64) int64 { - if 0 == len(values) { - return 0 - } - var max int64 = math.MinInt64 - for _, v := range values { - if max < v { - max = v - } - } - return max -} - -// SampleMean returns the mean value of the slice of int64. -func SampleMean(values []int64) float64 { - if 0 == len(values) { - return 0.0 - } - return float64(SampleSum(values)) / float64(len(values)) -} - -// SampleMin returns the minimum value of the slice of int64. -func SampleMin(values []int64) int64 { - if 0 == len(values) { - return 0 - } - var min int64 = math.MaxInt64 - for _, v := range values { - if min > v { - min = v - } - } - return min -} - -// SamplePercentiles returns an arbitrary percentile of the slice of int64. -func SamplePercentile(values int64Slice, p float64) float64 { - return SamplePercentiles(values, []float64{p})[0] -} - -// SamplePercentiles returns a slice of arbitrary percentiles of the slice of -// int64. -func SamplePercentiles(values int64Slice, ps []float64) []float64 { - scores := make([]float64, len(ps)) - size := len(values) - if size > 0 { - sort.Sort(values) - for i, p := range ps { - pos := p * float64(size+1) - if pos < 1.0 { - scores[i] = float64(values[0]) - } else if pos >= float64(size) { - scores[i] = float64(values[size-1]) - } else { - lower := float64(values[int(pos)-1]) - upper := float64(values[int(pos)]) - scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) - } - } - } - return scores -} - -// SampleSnapshot is a read-only copy of another Sample. -type SampleSnapshot struct { - count int64 - values []int64 -} - -// Clear panics. -func (*SampleSnapshot) Clear() { - panic("Clear called on a SampleSnapshot") -} - -// Count returns the count of inputs at the time the snapshot was taken. -func (s *SampleSnapshot) Count() int64 { return s.count } - -// Max returns the maximal value at the time the snapshot was taken. -func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } - -// Mean returns the mean value at the time the snapshot was taken. -func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } - -// Min returns the minimal value at the time the snapshot was taken. -func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } - -// Percentile returns an arbitrary percentile of values at the time the -// snapshot was taken. -func (s *SampleSnapshot) Percentile(p float64) float64 { - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values at the time -// the snapshot was taken. -func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample at the time the snapshot was taken. -func (s *SampleSnapshot) Size() int { return len(s.values) } - -// Snapshot returns the snapshot. -func (s *SampleSnapshot) Snapshot() Sample { return s } - -// StdDev returns the standard deviation of values at the time the snapshot was -// taken. -func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } - -// Sum returns the sum of values at the time the snapshot was taken. -func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } - -// Update panics. -func (*SampleSnapshot) Update(int64) { - panic("Update called on a SampleSnapshot") -} - -// Values returns a copy of the values in the sample. -func (s *SampleSnapshot) Values() []int64 { - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of values at the time the snapshot was taken. -func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } - -// SampleStdDev returns the standard deviation of the slice of int64. -func SampleStdDev(values []int64) float64 { - return math.Sqrt(SampleVariance(values)) -} - -// SampleSum returns the sum of the slice of int64. -func SampleSum(values []int64) int64 { - var sum int64 - for _, v := range values { - sum += v - } - return sum -} - -// SampleVariance returns the variance of the slice of int64. -func SampleVariance(values []int64) float64 { - if 0 == len(values) { - return 0.0 - } - m := SampleMean(values) - var sum float64 - for _, v := range values { - d := float64(v) - m - sum += d * d - } - return sum / float64(len(values)) -} - -// A uniform sample using Vitter's Algorithm R. -// -// -type UniformSample struct { - count int64 - mutex sync.Mutex - reservoirSize int - values []int64 -} - -// NewUniformSample constructs a new uniform sample with the given reservoir -// size. -func NewUniformSample(reservoirSize int) Sample { - if UseNilMetrics { - return NilSample{} - } - return &UniformSample{ - reservoirSize: reservoirSize, - values: make([]int64, 0, reservoirSize), - } -} - -// Clear clears all samples. -func (s *UniformSample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.values = make([]int64, 0, s.reservoirSize) -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *UniformSample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *UniformSample) Max() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMax(s.values) -} - -// Mean returns the mean of the values in the sample. -func (s *UniformSample) Mean() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMean(s.values) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *UniformSample) Min() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMin(s.values) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *UniformSample) Percentile(p float64) float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *UniformSample) Percentiles(ps []float64) []float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *UniformSample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return len(s.values) -} - -// Snapshot returns a read-only copy of the sample. -func (s *UniformSample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *UniformSample) StdDev() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleStdDev(s.values) -} - -// Sum returns the sum of the values in the sample. -func (s *UniformSample) Sum() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleSum(s.values) -} - -// Update samples a new value. -func (s *UniformSample) Update(v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if len(s.values) < s.reservoirSize { - s.values = append(s.values, v) - } else { - s.values[rand.Intn(s.reservoirSize)] = v - } -} - -// Values returns a copy of the values in the sample. -func (s *UniformSample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of the values in the sample. -func (s *UniformSample) Variance() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleVariance(s.values) -} - -// expDecaySample represents an individual sample in a heap. -type expDecaySample struct { - k float64 - v int64 -} - -// expDecaySampleHeap is a min-heap of expDecaySamples. -type expDecaySampleHeap []expDecaySample - -func (q expDecaySampleHeap) Len() int { - return len(q) -} - -func (q expDecaySampleHeap) Less(i, j int) bool { - return q[i].k < q[j].k -} - -func (q *expDecaySampleHeap) Pop() interface{} { - q_ := *q - n := len(q_) - i := q_[n-1] - q_ = q_[0 : n-1] - *q = q_ - return i -} - -func (q *expDecaySampleHeap) Push(x interface{}) { - q_ := *q - n := len(q_) - q_ = q_[0 : n+1] - q_[n] = x.(expDecaySample) - *q = q_ -} - -func (q expDecaySampleHeap) Swap(i, j int) { - q[i], q[j] = q[j], q[i] -} - -type int64Slice []int64 - -func (p int64Slice) Len() int { return len(p) } -func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample_test.go deleted file mode 100644 index 3cff3c09..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample_test.go +++ /dev/null @@ -1,352 +0,0 @@ -package metrics - -import ( - "math/rand" - "runtime" - "testing" - "time" -) - -// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively -// expensive computations like Variance, the cost of copying the Sample, as -// approximated by a make and copy, is much greater than the cost of the -// computation for small samples and only slightly less for large samples. -func BenchmarkCompute1000(b *testing.B) { - s := make([]int64, 1000) - for i := 0; i < len(s); i++ { - s[i] = int64(i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - SampleVariance(s) - } -} -func BenchmarkCompute1000000(b *testing.B) { - s := make([]int64, 1000000) - for i := 0; i < len(s); i++ { - s[i] = int64(i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - SampleVariance(s) - } -} -func BenchmarkCopy1000(b *testing.B) { - s := make([]int64, 1000) - for i := 0; i < len(s); i++ { - s[i] = int64(i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - sCopy := make([]int64, len(s)) - copy(sCopy, s) - } -} -func BenchmarkCopy1000000(b *testing.B) { - s := make([]int64, 1000000) - for i := 0; i < len(s); i++ { - s[i] = int64(i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - sCopy := make([]int64, len(s)) - copy(sCopy, s) - } -} - -func BenchmarkExpDecaySample257(b *testing.B) { - benchmarkSample(b, NewExpDecaySample(257, 0.015)) -} - -func BenchmarkExpDecaySample514(b *testing.B) { - benchmarkSample(b, NewExpDecaySample(514, 0.015)) -} - -func BenchmarkExpDecaySample1028(b *testing.B) { - benchmarkSample(b, NewExpDecaySample(1028, 0.015)) -} - -func BenchmarkUniformSample257(b *testing.B) { - benchmarkSample(b, NewUniformSample(257)) -} - -func BenchmarkUniformSample514(b *testing.B) { - benchmarkSample(b, NewUniformSample(514)) -} - -func BenchmarkUniformSample1028(b *testing.B) { - benchmarkSample(b, NewUniformSample(1028)) -} - -func TestExpDecaySample10(t *testing.T) { - rand.Seed(1) - s := NewExpDecaySample(100, 0.99) - for i := 0; i < 10; i++ { - s.Update(int64(i)) - } - if size := s.Count(); 10 != size { - t.Errorf("s.Count(): 10 != %v\n", size) - } - if size := s.Size(); 10 != size { - t.Errorf("s.Size(): 10 != %v\n", size) - } - if l := len(s.Values()); 10 != l { - t.Errorf("len(s.Values()): 10 != %v\n", l) - } - for _, v := range s.Values() { - if v > 10 || v < 0 { - t.Errorf("out of range [0, 10): %v\n", v) - } - } -} - -func TestExpDecaySample100(t *testing.T) { - rand.Seed(1) - s := NewExpDecaySample(1000, 0.01) - for i := 0; i < 100; i++ { - s.Update(int64(i)) - } - if size := s.Count(); 100 != size { - t.Errorf("s.Count(): 100 != %v\n", size) - } - if size := s.Size(); 100 != size { - t.Errorf("s.Size(): 100 != %v\n", size) - } - if l := len(s.Values()); 100 != l { - t.Errorf("len(s.Values()): 100 != %v\n", l) - } - for _, v := range s.Values() { - if v > 100 || v < 0 { - t.Errorf("out of range [0, 100): %v\n", v) - } - } -} - -func TestExpDecaySample1000(t *testing.T) { - rand.Seed(1) - s := NewExpDecaySample(100, 0.99) - for i := 0; i < 1000; i++ { - s.Update(int64(i)) - } - if size := s.Count(); 1000 != size { - t.Errorf("s.Count(): 1000 != %v\n", size) - } - if size := s.Size(); 100 != size { - t.Errorf("s.Size(): 100 != %v\n", size) - } - if l := len(s.Values()); 100 != l { - t.Errorf("len(s.Values()): 100 != %v\n", l) - } - for _, v := range s.Values() { - if v > 1000 || v < 0 { - t.Errorf("out of range [0, 1000): %v\n", v) - } - } -} - -// This test makes sure that the sample's priority is not amplified by using -// nanosecond duration since start rather than second duration since start. -// The priority becomes +Inf quickly after starting if this is done, -// effectively freezing the set of samples until a rescale step happens. -func TestExpDecaySampleNanosecondRegression(t *testing.T) { - rand.Seed(1) - s := NewExpDecaySample(100, 0.99) - for i := 0; i < 100; i++ { - s.Update(10) - } - time.Sleep(1 * time.Millisecond) - for i := 0; i < 100; i++ { - s.Update(20) - } - v := s.Values() - avg := float64(0) - for i := 0; i < len(v); i++ { - avg += float64(v[i]) - } - avg /= float64(len(v)) - if avg > 16 || avg < 14 { - t.Errorf("out of range [14, 16]: %v\n", avg) - } -} - -func TestExpDecaySampleSnapshot(t *testing.T) { - now := time.Now() - rand.Seed(1) - s := NewExpDecaySample(100, 0.99) - for i := 1; i <= 10000; i++ { - s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) - } - snapshot := s.Snapshot() - s.Update(1) - testExpDecaySampleStatistics(t, snapshot) -} - -func TestExpDecaySampleStatistics(t *testing.T) { - now := time.Now() - rand.Seed(1) - s := NewExpDecaySample(100, 0.99) - for i := 1; i <= 10000; i++ { - s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) - } - testExpDecaySampleStatistics(t, s) -} - -func TestUniformSample(t *testing.T) { - rand.Seed(1) - s := NewUniformSample(100) - for i := 0; i < 1000; i++ { - s.Update(int64(i)) - } - if size := s.Count(); 1000 != size { - t.Errorf("s.Count(): 1000 != %v\n", size) - } - if size := s.Size(); 100 != size { - t.Errorf("s.Size(): 100 != %v\n", size) - } - if l := len(s.Values()); 100 != l { - t.Errorf("len(s.Values()): 100 != %v\n", l) - } - for _, v := range s.Values() { - if v > 1000 || v < 0 { - t.Errorf("out of range [0, 100): %v\n", v) - } - } -} - -func TestUniformSampleIncludesTail(t *testing.T) { - rand.Seed(1) - s := NewUniformSample(100) - max := 100 - for i := 0; i < max; i++ { - s.Update(int64(i)) - } - v := s.Values() - sum := 0 - exp := (max - 1) * max / 2 - for i := 0; i < len(v); i++ { - sum += int(v[i]) - } - if exp != sum { - t.Errorf("sum: %v != %v\n", exp, sum) - } -} - -func TestUniformSampleSnapshot(t *testing.T) { - s := NewUniformSample(100) - for i := 1; i <= 10000; i++ { - s.Update(int64(i)) - } - snapshot := s.Snapshot() - s.Update(1) - testUniformSampleStatistics(t, snapshot) -} - -func TestUniformSampleStatistics(t *testing.T) { - rand.Seed(1) - s := NewUniformSample(100) - for i := 1; i <= 10000; i++ { - s.Update(int64(i)) - } - testUniformSampleStatistics(t, s) -} - -func benchmarkSample(b *testing.B, s Sample) { - var memStats runtime.MemStats - runtime.ReadMemStats(&memStats) - pauseTotalNs := memStats.PauseTotalNs - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Update(1) - } - b.StopTimer() - runtime.GC() - runtime.ReadMemStats(&memStats) - b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N) -} - -func testExpDecaySampleStatistics(t *testing.T, s Sample) { - if count := s.Count(); 10000 != count { - t.Errorf("s.Count(): 10000 != %v\n", count) - } - if min := s.Min(); 107 != min { - t.Errorf("s.Min(): 107 != %v\n", min) - } - if max := s.Max(); 10000 != max { - t.Errorf("s.Max(): 10000 != %v\n", max) - } - if mean := s.Mean(); 4965.98 != mean { - t.Errorf("s.Mean(): 4965.98 != %v\n", mean) - } - if stdDev := s.StdDev(); 2959.825156930727 != stdDev { - t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev) - } - ps := s.Percentiles([]float64{0.5, 0.75, 0.99}) - if 4615 != ps[0] { - t.Errorf("median: 4615 != %v\n", ps[0]) - } - if 7672 != ps[1] { - t.Errorf("75th percentile: 7672 != %v\n", ps[1]) - } - if 9998.99 != ps[2] { - t.Errorf("99th percentile: 9998.99 != %v\n", ps[2]) - } -} - -func testUniformSampleStatistics(t *testing.T, s Sample) { - if count := s.Count(); 10000 != count { - t.Errorf("s.Count(): 10000 != %v\n", count) - } - if min := s.Min(); 9412 != min { - t.Errorf("s.Min(): 9412 != %v\n", min) - } - if max := s.Max(); 10000 != max { - t.Errorf("s.Max(): 10000 != %v\n", max) - } - if mean := s.Mean(); 9902.26 != mean { - t.Errorf("s.Mean(): 9902.26 != %v\n", mean) - } - if stdDev := s.StdDev(); 101.8667384380201 != stdDev { - t.Errorf("s.StdDev(): 101.8667384380201 != %v\n", stdDev) - } - ps := s.Percentiles([]float64{0.5, 0.75, 0.99}) - if 9930.5 != ps[0] { - t.Errorf("median: 9930.5 != %v\n", ps[0]) - } - if 9973.75 != ps[1] { - t.Errorf("75th percentile: 9973.75 != %v\n", ps[1]) - } - if 9999.99 != ps[2] { - t.Errorf("99th percentile: 9999.99 != %v\n", ps[2]) - } -} - -// TestUniformSampleConcurrentUpdateCount would expose data race problems with -// concurrent Update and Count calls on Sample when test is called with -race -// argument -func TestUniformSampleConcurrentUpdateCount(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - s := NewUniformSample(100) - for i := 0; i < 100; i++ { - s.Update(int64(i)) - } - quit := make(chan struct{}) - go func() { - t := time.NewTicker(10 * time.Millisecond) - for { - select { - case <-t.C: - s.Update(rand.Int63()) - case <-quit: - t.Stop() - return - } - } - }() - for i := 0; i < 1000; i++ { - s.Count() - time.Sleep(5 * time.Millisecond) - } - quit <- struct{}{} -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/stathat/stathat.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/stathat/stathat.go deleted file mode 100644 index 0afcb484..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/stathat/stathat.go +++ /dev/null @@ -1,69 +0,0 @@ -// Metrics output to StatHat. -package stathat - -import ( - "github.com/rcrowley/go-metrics" - "github.com/stathat/go" - "log" - "time" -) - -func Stathat(r metrics.Registry, d time.Duration, userkey string) { - for { - if err := sh(r, userkey); nil != err { - log.Println(err) - } - time.Sleep(d) - } -} - -func sh(r metrics.Registry, userkey string) error { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case metrics.Counter: - stathat.PostEZCount(name, userkey, int(metric.Count())) - case metrics.Gauge: - stathat.PostEZValue(name, userkey, float64(metric.Value())) - case metrics.GaugeFloat64: - stathat.PostEZValue(name, userkey, float64(metric.Value())) - case metrics.Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - stathat.PostEZCount(name+".count", userkey, int(h.Count())) - stathat.PostEZValue(name+".min", userkey, float64(h.Min())) - stathat.PostEZValue(name+".max", userkey, float64(h.Max())) - stathat.PostEZValue(name+".mean", userkey, float64(h.Mean())) - stathat.PostEZValue(name+".std-dev", userkey, float64(h.StdDev())) - stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0])) - stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1])) - stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2])) - stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3])) - stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4])) - case metrics.Meter: - m := metric.Snapshot() - stathat.PostEZCount(name+".count", userkey, int(m.Count())) - stathat.PostEZValue(name+".one-minute", userkey, float64(m.Rate1())) - stathat.PostEZValue(name+".five-minute", userkey, float64(m.Rate5())) - stathat.PostEZValue(name+".fifteen-minute", userkey, float64(m.Rate15())) - stathat.PostEZValue(name+".mean", userkey, float64(m.RateMean())) - case metrics.Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - stathat.PostEZCount(name+".count", userkey, int(t.Count())) - stathat.PostEZValue(name+".min", userkey, float64(t.Min())) - stathat.PostEZValue(name+".max", userkey, float64(t.Max())) - stathat.PostEZValue(name+".mean", userkey, float64(t.Mean())) - stathat.PostEZValue(name+".std-dev", userkey, float64(t.StdDev())) - stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0])) - stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1])) - stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2])) - stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3])) - stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4])) - stathat.PostEZValue(name+".one-minute", userkey, float64(t.Rate1())) - stathat.PostEZValue(name+".five-minute", userkey, float64(t.Rate5())) - stathat.PostEZValue(name+".fifteen-minute", userkey, float64(t.Rate15())) - stathat.PostEZValue(name+".mean-rate", userkey, float64(t.RateMean())) - } - }) - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/syslog.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/syslog.go deleted file mode 100644 index 693f1908..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/syslog.go +++ /dev/null @@ -1,78 +0,0 @@ -// +build !windows - -package metrics - -import ( - "fmt" - "log/syslog" - "time" -) - -// Output each metric in the given registry to syslog periodically using -// the given syslogger. -func Syslog(r Registry, d time.Duration, w *syslog.Writer) { - for _ = range time.Tick(d) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) - case Gauge: - w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) - case GaugeFloat64: - w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) - case Healthcheck: - metric.Check() - w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", - name, - h.Count(), - h.Min(), - h.Max(), - h.Mean(), - h.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - )) - case Meter: - m := metric.Snapshot() - w.Info(fmt.Sprintf( - "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", - name, - m.Count(), - m.Rate1(), - m.Rate5(), - m.Rate15(), - m.RateMean(), - )) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", - name, - t.Count(), - t.Min(), - t.Max(), - t.Mean(), - t.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - t.Rate1(), - t.Rate5(), - t.Rate15(), - t.RateMean(), - )) - } - }) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer.go deleted file mode 100644 index 73f19b58..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer.go +++ /dev/null @@ -1,299 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Timers capture the duration and rate of events. -type Timer interface { - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Timer - StdDev() float64 - Time(func()) - Update(time.Duration) - UpdateSince(time.Time) - Variance() float64 -} - -// GetOrRegisterTimer returns an existing Timer or constructs and registers a -// new StandardTimer. -func GetOrRegisterTimer(name string, r Registry) Timer { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewTimer).(Timer) -} - -// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. -func NewCustomTimer(h Histogram, m Meter) Timer { - if UseNilMetrics { - return NilTimer{} - } - return &StandardTimer{ - histogram: h, - meter: m, - } -} - -// NewRegisteredTimer constructs and registers a new StandardTimer. -func NewRegisteredTimer(name string, r Registry) Timer { - c := NewTimer() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewTimer constructs a new StandardTimer using an exponentially-decaying -// sample with the same reservoir size and alpha as UNIX load averages. -func NewTimer() Timer { - if UseNilMetrics { - return NilTimer{} - } - return &StandardTimer{ - histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), - meter: NewMeter(), - } -} - -// NilTimer is a no-op Timer. -type NilTimer struct { - h Histogram - m Meter -} - -// Count is a no-op. -func (NilTimer) Count() int64 { return 0 } - -// Max is a no-op. -func (NilTimer) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilTimer) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilTimer) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilTimer) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilTimer) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Rate1 is a no-op. -func (NilTimer) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilTimer) Rate5() float64 { return 0.0 } - -// Rate15 is a no-op. -func (NilTimer) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilTimer) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilTimer) Snapshot() Timer { return NilTimer{} } - -// StdDev is a no-op. -func (NilTimer) StdDev() float64 { return 0.0 } - -// Time is a no-op. -func (NilTimer) Time(func()) {} - -// Update is a no-op. -func (NilTimer) Update(time.Duration) {} - -// UpdateSince is a no-op. -func (NilTimer) UpdateSince(time.Time) {} - -// Variance is a no-op. -func (NilTimer) Variance() float64 { return 0.0 } - -// StandardTimer is the standard implementation of a Timer and uses a Histogram -// and Meter. -type StandardTimer struct { - histogram Histogram - meter Meter - mutex sync.Mutex -} - -// Count returns the number of events recorded. -func (t *StandardTimer) Count() int64 { - return t.histogram.Count() -} - -// Max returns the maximum value in the sample. -func (t *StandardTimer) Max() int64 { - return t.histogram.Max() -} - -// Mean returns the mean of the values in the sample. -func (t *StandardTimer) Mean() float64 { - return t.histogram.Mean() -} - -// Min returns the minimum value in the sample. -func (t *StandardTimer) Min() int64 { - return t.histogram.Min() -} - -// Percentile returns an arbitrary percentile of the values in the sample. -func (t *StandardTimer) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (t *StandardTimer) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (t *StandardTimer) Rate1() float64 { - return t.meter.Rate1() -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (t *StandardTimer) Rate5() float64 { - return t.meter.Rate5() -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (t *StandardTimer) Rate15() float64 { - return t.meter.Rate15() -} - -// RateMean returns the meter's mean rate of events per second. -func (t *StandardTimer) RateMean() float64 { - return t.meter.RateMean() -} - -// Snapshot returns a read-only copy of the timer. -func (t *StandardTimer) Snapshot() Timer { - t.mutex.Lock() - defer t.mutex.Unlock() - return &TimerSnapshot{ - histogram: t.histogram.Snapshot().(*HistogramSnapshot), - meter: t.meter.Snapshot().(*MeterSnapshot), - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (t *StandardTimer) StdDev() float64 { - return t.histogram.StdDev() -} - -// Record the duration of the execution of the given function. -func (t *StandardTimer) Time(f func()) { - ts := time.Now() - f() - t.Update(time.Since(ts)) -} - -// Record the duration of an event. -func (t *StandardTimer) Update(d time.Duration) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(d)) - t.meter.Mark(1) -} - -// Record the duration of an event that started at a time and ends now. -func (t *StandardTimer) UpdateSince(ts time.Time) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(time.Since(ts))) - t.meter.Mark(1) -} - -// Variance returns the variance of the values in the sample. -func (t *StandardTimer) Variance() float64 { - return t.histogram.Variance() -} - -// TimerSnapshot is a read-only copy of another Timer. -type TimerSnapshot struct { - histogram *HistogramSnapshot - meter *MeterSnapshot -} - -// Count returns the number of events recorded at the time the snapshot was -// taken. -func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } - -// Max returns the maximum value at the time the snapshot was taken. -func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } - -// Mean returns the mean value at the time the snapshot was taken. -func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } - -// Min returns the minimum value at the time the snapshot was taken. -func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } - -// Percentile returns an arbitrary percentile of sampled values at the time the -// snapshot was taken. -func (t *TimerSnapshot) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of sampled values at -// the time the snapshot was taken. -func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } - -// Snapshot returns the snapshot. -func (t *TimerSnapshot) Snapshot() Timer { return t } - -// StdDev returns the standard deviation of the values at the time the snapshot -// was taken. -func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } - -// Time panics. -func (*TimerSnapshot) Time(func()) { - panic("Time called on a TimerSnapshot") -} - -// Update panics. -func (*TimerSnapshot) Update(time.Duration) { - panic("Update called on a TimerSnapshot") -} - -// UpdateSince panics. -func (*TimerSnapshot) UpdateSince(time.Time) { - panic("UpdateSince called on a TimerSnapshot") -} - -// Variance returns the variance of the values at the time the snapshot was -// taken. -func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer_test.go deleted file mode 100644 index 2fa415d4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package metrics - -import ( - "math" - "testing" - "time" -) - -func BenchmarkTimer(b *testing.B) { - tm := NewTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - tm.Update(1) - } -} - -func TestGetOrRegisterTimer(t *testing.T) { - r := NewRegistry() - NewRegisteredTimer("foo", r).Update(47) - if tm := GetOrRegisterTimer("foo", r); 1 != tm.Count() { - t.Fatal(tm) - } -} - -func TestTimerExtremes(t *testing.T) { - tm := NewTimer() - tm.Update(math.MaxInt64) - tm.Update(0) - if stdDev := tm.StdDev(); 4.611686018427388e+18 != stdDev { - t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev) - } -} - -func TestTimerFunc(t *testing.T) { - tm := NewTimer() - tm.Time(func() { time.Sleep(50e6) }) - if max := tm.Max(); 45e6 > max || max > 55e6 { - t.Errorf("tm.Max(): 45e6 > %v || %v > 55e6\n", max, max) - } -} - -func TestTimerZero(t *testing.T) { - tm := NewTimer() - if count := tm.Count(); 0 != count { - t.Errorf("tm.Count(): 0 != %v\n", count) - } - if min := tm.Min(); 0 != min { - t.Errorf("tm.Min(): 0 != %v\n", min) - } - if max := tm.Max(); 0 != max { - t.Errorf("tm.Max(): 0 != %v\n", max) - } - if mean := tm.Mean(); 0.0 != mean { - t.Errorf("tm.Mean(): 0.0 != %v\n", mean) - } - if stdDev := tm.StdDev(); 0.0 != stdDev { - t.Errorf("tm.StdDev(): 0.0 != %v\n", stdDev) - } - ps := tm.Percentiles([]float64{0.5, 0.75, 0.99}) - if 0.0 != ps[0] { - t.Errorf("median: 0.0 != %v\n", ps[0]) - } - if 0.0 != ps[1] { - t.Errorf("75th percentile: 0.0 != %v\n", ps[1]) - } - if 0.0 != ps[2] { - t.Errorf("99th percentile: 0.0 != %v\n", ps[2]) - } - if rate1 := tm.Rate1(); 0.0 != rate1 { - t.Errorf("tm.Rate1(): 0.0 != %v\n", rate1) - } - if rate5 := tm.Rate5(); 0.0 != rate5 { - t.Errorf("tm.Rate5(): 0.0 != %v\n", rate5) - } - if rate15 := tm.Rate15(); 0.0 != rate15 { - t.Errorf("tm.Rate15(): 0.0 != %v\n", rate15) - } - if rateMean := tm.RateMean(); 0.0 != rateMean { - t.Errorf("tm.RateMean(): 0.0 != %v\n", rateMean) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer.go deleted file mode 100644 index 091e971d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer.go +++ /dev/null @@ -1,100 +0,0 @@ -package metrics - -import ( - "fmt" - "io" - "sort" - "time" -) - -// Write sorts writes each metric in the given registry periodically to the -// given io.Writer. -func Write(r Registry, d time.Duration, w io.Writer) { - for _ = range time.Tick(d) { - WriteOnce(r, w) - } -} - -// WriteOnce sorts and writes metrics in the given registry to the given -// io.Writer. -func WriteOnce(r Registry, w io.Writer) { - var namedMetrics namedMetricSlice - r.Each(func(name string, i interface{}) { - namedMetrics = append(namedMetrics, namedMetric{name, i}) - }) - - sort.Sort(namedMetrics) - for _, namedMetric := range namedMetrics { - switch metric := namedMetric.m.(type) { - case Counter: - fmt.Fprintf(w, "counter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", metric.Count()) - case Gauge: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %9d\n", metric.Value()) - case GaugeFloat64: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) - fmt.Fprintf(w, " error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "histogram %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", h.Count()) - fmt.Fprintf(w, " min: %9d\n", h.Min()) - fmt.Fprintf(w, " max: %9d\n", h.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "meter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", m.Count()) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "timer %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", t.Count()) - fmt.Fprintf(w, " min: %9d\n", t.Min()) - fmt.Fprintf(w, " max: %9d\n", t.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) - } - } -} - -type namedMetric struct { - name string - m interface{} -} - -// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. -type namedMetricSlice []namedMetric - -func (nms namedMetricSlice) Len() int { return len(nms) } - -func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } - -func (nms namedMetricSlice) Less(i, j int) bool { - return nms[i].name < nms[j].name -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer_test.go deleted file mode 100644 index 1aacc287..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package metrics - -import ( - "sort" - "testing" -) - -func TestMetricsSorting(t *testing.T) { - var namedMetrics = namedMetricSlice{ - {name: "zzz"}, - {name: "bbb"}, - {name: "fff"}, - {name: "ggg"}, - } - - sort.Sort(namedMetrics) - for i, name := range []string{"bbb", "fff", "ggg", "zzz"} { - if namedMetrics[i].name != name { - t.Fail() - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.gitignore b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.gitignore deleted file mode 100644 index ca502e29..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.nut -*.swp -examples/example1 -examples/example_web diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/LICENSE deleted file mode 100644 index 01a9a5c4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/README.md deleted file mode 100644 index 61068a82..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/README.md +++ /dev/null @@ -1,119 +0,0 @@ -# GoRelic - -New Relic agent for Go runtime. It collect a lot of metrics about scheduler, garbage collector and memory allocator and -send them to NewRelic. - -### Requirements -- Go 1.1 or higher -- github.com/yvasiyarov/gorelic -- github.com/yvasiyarov/newrelic_platform_go -- github.com/yvasiyarov/go-metrics - -You have to install manually only first two dependencies. All other dependencies will be installed automatically -by Go toolchain. - -### Installation -```bash -go get github.com/yvasiyarov/gorelic -``` -and add to the initialization part of your application following code: -```go -import ( - "github.com/yvasiyarov/gorelic" -) -.... - -agent := gorelic.NewAgent() -agent.Verbose = true -agent.NewrelicLicense = "YOUR NEWRELIC LICENSE KEY THERE" -agent.Run() - -``` - -### Middleware -If you using Beego, Martini, Revel or Gin framework you can hook up gorelic with your application by using the following middleware: -- https://github.com/yvasiyarov/beego_gorelic -- https://github.com/yvasiyarov/martini_gorelic -- https://github.com/yvasiyarov/gocraft_gorelic -- http://wiki.colar.net/revel_newelic -- https://github.com/jingweno/negroni-gorelic -- https://github.com/brandfolder/gin-gorelic - - -### Configuration -- NewrelicLicense - its the only mandatory setting of this agent. -- NewrelicName - component name in NewRelic dashboard. Default value: "Go daemon" -- NewrelicPollInterval - how often metrics will be sent to NewRelic. Default value: 60 seconds -- Verbose - print some usefull for debugging information. Default value: false -- CollectGcStat - should agent collect garbage collector statistic or not. Default value: true -- CollectHTTPStat - should agent collect HTTP metrics. Default value: false -- CollectMemoryStat - should agent collect memory allocator statistic or not. Default value: true -- GCPollInterval - how often should GC statistic collected. Default value: 10 seconds. It has performance impact. For more information, please, see metrics documentation. -- MemoryAllocatorPollInterval - how often should memory allocator statistic collected. Default value: 60 seconds. It has performance impact. For more information, please, read metrics documentation. - - -## Metrics reported by plugin -This agent use functions exposed by runtime or runtime/debug packages to collect most important information about Go runtime. - -### General metrics -- Runtime/General/NOGoroutines - number of runned go routines, as it reported by NumGoroutine() from runtime package -- Runtime/General/NOCgoCalls - number of runned cgo calls, as it reported by NumCgoCall() from runtime package - -### Garbage collector metrics -- Runtime/GC/NumberOfGCCalls - Nuber of GC calls, as it reported by ReadGCStats() from runtime/debug -- Runtime/GC/PauseTotalTime - Total pause time diring GC calls, as it reported by ReadGCStats() from runtime/debug (in nanoseconds) -- Runtime/GC/GCTime/Max - max GC time -- Runtime/GC/GCTime/Min - min GC time -- Runtime/GC/GCTime/Mean - GC mean time -- Runtime/GC/GCTime/Percentile95 - 95% percentile of GC time - -All this metrics are measured in nanoseconds. Last 4 of them can be inaccurate if GC called more often then once in GCPollInterval. -If in your workload GC is called more often - you can consider decreasing value of GCPollInterval. -But be carefull, ReadGCStats() blocks mheap, so its not good idea to set GCPollInterval to very low values. - -### Memory allocator -- Component/Runtime/Memory/SysMem/Total - number of bytes/minute allocated from OS totally. -- Component/Runtime/Memory/SysMem/Stack - number of bytes/minute allocated from OS for stacks. -- Component/Runtime/Memory/SysMem/MSpan - number of bytes/minute allocated from OS for internal MSpan structs. -- Component/Runtime/Memory/SysMem/MCache - number of bytes/minute allocated from OS for internal MCache structs. -- Component/Runtime/Memory/SysMem/Heap - number of bytes/minute allocated from OS for heap. -- Component/Runtime/Memory/SysMem/BuckHash - number of bytes/minute allocated from OS for internal BuckHash structs. -- Component/Runtime/Memory/Operations/NoFrees - number of memory frees per minute -- Component/Runtime/Memory/Operations/NoMallocs - number of memory allocations per minute -- Component/Runtime/Memory/Operations/NoPointerLookups - number of pointer lookups per minute -- Component/Runtime/Memory/InUse/Total - total amount of memory in use -- Component/Runtime/Memory/InUse/Heap - amount of memory in use for heap -- Component/Runtime/Memory/InUse/MCacheInuse - amount of memory in use for MCache internal structures -- Component/Runtime/Memory/InUse/MSpanInuse - amount of memory in use for MSpan internal structures -- Component/Runtime/Memory/InUse/Stack - amount of memory in use for stacks - -### Process metrics -- Component/Runtime/System/Threads - number of OS threads used -- Runtime/System/FDSize - number of file descriptors, used by process -- Runtime/System/Memory/VmPeakSize - VM max size -- Runtime/System/Memory/VmCurrent - VM current size -- Runtime/System/Memory/RssPeak - max size of resident memory set -- Runtime/System/Memory/RssCurrent - current size of resident memory set - -All this metrics collected once in MemoryAllocatorPollInterval. In order to collect this statistic agent use ReadMemStats() routine. -This routine calls stoptheworld() internally and it block everything. So, please, consider this when you change MemoryAllocatorPollInterval value. - -### HTTP metrics -- throughput (requests per second), calculated for last minute -- mean throughput (requests per second) -- mean response time -- min response time -- max response time -- 75%, 90%, 95% percentiles for response time - - -In order to collect HTTP metrics, handler functions must be wrapped using WrapHTTPHandlerFunc: - -```go -http.HandleFunc("/", agent.WrapHTTPHandlerFunc(handler)) -``` - -## TODO -- Collect per-size allocation statistic -- Collect user defined metrics - diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/agent.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/agent.go deleted file mode 100644 index 660623d6..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/agent.go +++ /dev/null @@ -1,137 +0,0 @@ -package gorelic - -import ( - "errors" - "fmt" - metrics "github.com/yvasiyarov/go-metrics" - "github.com/yvasiyarov/newrelic_platform_go" - "log" - "net/http" -) - -const ( - // DefaultNewRelicPollInterval - how often we will report metrics to NewRelic. - // Recommended values is 60 seconds - DefaultNewRelicPollInterval = 60 - - // DefaultGcPollIntervalInSeconds - how often we will get garbage collector run statistic - // Default value is - every 10 seconds - // During GC stat pooling - mheap will be locked, so be carefull changing this value - DefaultGcPollIntervalInSeconds = 10 - - // DefaultMemoryAllocatorPollIntervalInSeconds - how often we will get memory allocator statistic. - // Default value is - every 60 seconds - // During this process stoptheword() is called, so be carefull changing this value - DefaultMemoryAllocatorPollIntervalInSeconds = 60 - - //DefaultAgentGuid is plugin ID in NewRelic. - //You should not change it unless you want to create your own plugin. - DefaultAgentGuid = "com.github.yvasiyarov.GoRelic" - - //CurrentAgentVersion is plugin version - CurrentAgentVersion = "0.0.6" - - //DefaultAgentName in NewRelic GUI. You can change it. - DefaultAgentName = "Go daemon" -) - -//Agent - is NewRelic agent implementation. -//Agent start separate go routine which will report data to NewRelic -type Agent struct { - NewrelicName string - NewrelicLicense string - NewrelicPollInterval int - Verbose bool - CollectGcStat bool - CollectMemoryStat bool - CollectHTTPStat bool - GCPollInterval int - MemoryAllocatorPollInterval int - AgentGUID string - AgentVersion string - plugin *newrelic_platform_go.NewrelicPlugin - HTTPTimer metrics.Timer -} - -//NewAgent build new Agent objects. -func NewAgent() *Agent { - agent := &Agent{ - NewrelicName: DefaultAgentName, - NewrelicPollInterval: DefaultNewRelicPollInterval, - Verbose: false, - CollectGcStat: true, - CollectMemoryStat: true, - GCPollInterval: DefaultGcPollIntervalInSeconds, - MemoryAllocatorPollInterval: DefaultMemoryAllocatorPollIntervalInSeconds, - AgentGUID: DefaultAgentGuid, - AgentVersion: CurrentAgentVersion, - } - return agent -} - -//WrapHTTPHandlerFunc instrument HTTP handler functions to collect HTTP metrics -func (agent *Agent) WrapHTTPHandlerFunc(h tHTTPHandlerFunc) tHTTPHandlerFunc { - agent.initTimer() - return func(w http.ResponseWriter, req *http.Request) { - proxy := newHTTPHandlerFunc(h) - proxy.timer = agent.HTTPTimer - proxy.ServeHTTP(w, req) - } -} - -//WrapHTTPHandler instrument HTTP handler object to collect HTTP metrics -func (agent *Agent) WrapHTTPHandler(h http.Handler) http.Handler { - agent.initTimer() - - proxy := newHTTPHandler(h) - proxy.timer = agent.HTTPTimer - return proxy -} - -//Run initialize Agent instance and start harvest go routine -func (agent *Agent) Run() error { - if agent.NewrelicLicense == "" { - return errors.New("please, pass a valid newrelic license key") - } - - agent.plugin = newrelic_platform_go.NewNewrelicPlugin(agent.AgentVersion, agent.NewrelicLicense, agent.NewrelicPollInterval) - component := newrelic_platform_go.NewPluginComponent(agent.NewrelicName, agent.AgentGUID) - agent.plugin.AddComponent(component) - - addRuntimeMericsToComponent(component) - - if agent.CollectGcStat { - addGCMericsToComponent(component, agent.GCPollInterval) - agent.debug(fmt.Sprintf("Init GC metrics collection. Poll interval %d seconds.", agent.GCPollInterval)) - } - if agent.CollectMemoryStat { - addMemoryMericsToComponent(component, agent.MemoryAllocatorPollInterval) - agent.debug(fmt.Sprintf("Init memory allocator metrics collection. Poll interval %d seconds.", agent.MemoryAllocatorPollInterval)) - } - - if agent.CollectHTTPStat { - agent.initTimer() - addHTTPMericsToComponent(component, agent.HTTPTimer) - agent.debug(fmt.Sprintf("Init HTTP metrics collection.")) - } - - agent.plugin.Verbose = agent.Verbose - go agent.plugin.Run() - return nil -} - -//Initialize global metrics.Timer object, used to collect HTTP metrics -func (agent *Agent) initTimer() { - if agent.HTTPTimer == nil { - agent.HTTPTimer = metrics.NewTimer() - } - - agent.CollectHTTPStat = true -} - -//Print debug messages -func (agent *Agent) debug(msg string) { - if agent.Verbose { - log.Println(msg) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/doc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/doc.go deleted file mode 100644 index 69de9fee..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package gorelic is an New Relic agent implementation for Go runtime. It collect a lot of metrics about Go scheduler, garbage collector and memory allocator and send them to NewRelic. -package gorelic diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example1.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example1.go deleted file mode 100644 index dc6c0e34..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example1.go +++ /dev/null @@ -1,52 +0,0 @@ -package main - -import ( - "flag" - "github.com/yvasiyarov/gorelic" - "log" - "math/rand" - "runtime" - "time" -) - -var newrelicLicense = flag.String("newrelic-license", "", "Newrelic license") - -func allocateAndSum(arraySize int) int { - arr := make([]int, arraySize, arraySize) - for i := range arr { - arr[i] = rand.Int() - } - time.Sleep(time.Duration(rand.Intn(3000)) * time.Millisecond) - - result := 0 - for _, v := range arr { - result += v - } - //log.Printf("Array size is: %d, sum is: %d\n", arraySize, result) - return result -} - -func doSomeJob(numRoutines int) { - for { - for i := 0; i < numRoutines; i++ { - go allocateAndSum(rand.Intn(1024) * 1024) - } - log.Printf("All %d routines started\n", numRoutines) - time.Sleep(1000 * time.Millisecond) - runtime.GC() - } -} - -func main() { - - flag.Parse() - if *newrelicLicense == "" { - log.Fatalf("Please, pass a valid newrelic license key.\n Use --help to get more information about available options\n") - } - agent := gorelic.NewAgent() - agent.Verbose = true - agent.NewrelicLicense = *newrelicLicense - agent.Run() - - doSomeJob(100) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example_web.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example_web.go deleted file mode 100644 index aae0ef7e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example_web.go +++ /dev/null @@ -1,63 +0,0 @@ -package main - -import ( - "expvar" - "flag" - "github.com/yvasiyarov/gorelic" - "io" - "log" - "math/rand" - "net/http" - "runtime" - "time" -) - -var newrelicLicense = flag.String("newrelic-license", "", "Newrelic license") - -var numCalls = expvar.NewInt("num_calls") - -func allocateAndSum(arraySize int) int { - arr := make([]int, arraySize, arraySize) - for i := range arr { - arr[i] = rand.Int() - } - time.Sleep(time.Duration(rand.Intn(3000)) * time.Millisecond) - - result := 0 - for _, v := range arr { - result += v - } - //log.Printf("Array size is: %d, sum is: %d\n", arraySize, result) - return result -} - -func doSomeJob(numRoutines int) { - for i := 0; i < numRoutines; i++ { - go allocateAndSum(rand.Intn(1024) * 1024) - } - log.Printf("All %d routines started\n", numRoutines) - time.Sleep(1000 * time.Millisecond) - runtime.GC() -} - -func helloServer(w http.ResponseWriter, req *http.Request) { - - doSomeJob(5) - io.WriteString(w, "Did some work") -} - -func main() { - flag.Parse() - if *newrelicLicense == "" { - log.Fatalf("Please, pass a valid newrelic license key.\n Use --help to get more information about available options\n") - } - agent := gorelic.NewAgent() - agent.Verbose = true - agent.CollectHTTPStat = true - agent.NewrelicLicense = *newrelicLicense - agent.Run() - - http.HandleFunc("/", agent.WrapHTTPHandlerFunc(helloServer)) - http.ListenAndServe(":8080", nil) - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gc_metrics.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gc_metrics.go deleted file mode 100644 index 39405940..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gc_metrics.go +++ /dev/null @@ -1,65 +0,0 @@ -package gorelic - -import ( - metrics "github.com/yvasiyarov/go-metrics" - "github.com/yvasiyarov/newrelic_platform_go" - "time" -) - -func newGCMetricaDataSource(pollInterval int) goMetricaDataSource { - r := metrics.NewRegistry() - - metrics.RegisterDebugGCStats(r) - go metrics.CaptureDebugGCStats(r, time.Duration(pollInterval)*time.Second) - return goMetricaDataSource{r} -} - -func addGCMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) { - metrics := []*baseGoMetrica{ - &baseGoMetrica{ - name: "NumberOfGCCalls", - units: "calls", - dataSourceKey: "debug.GCStats.NumGC", - }, - &baseGoMetrica{ - name: "PauseTotalTime", - units: "nanoseconds", - dataSourceKey: "debug.GCStats.PauseTotal", - }, - } - - ds := newGCMetricaDataSource(pollInterval) - for _, m := range metrics { - m.basePath = "Runtime/GC/" - m.dataSource = ds - component.AddMetrica(&gaugeMetrica{m}) - } - - histogramMetrics := []*histogramMetrica{ - &histogramMetrica{ - statFunction: histogramMax, - baseGoMetrica: &baseGoMetrica{name: "Max"}, - }, - &histogramMetrica{ - statFunction: histogramMin, - baseGoMetrica: &baseGoMetrica{name: "Min"}, - }, - &histogramMetrica{ - statFunction: histogramMean, - baseGoMetrica: &baseGoMetrica{name: "Mean"}, - }, - &histogramMetrica{ - statFunction: histogramPercentile, - percentileValue: 0.95, - baseGoMetrica: &baseGoMetrica{name: "Percentile95"}, - }, - } - for _, m := range histogramMetrics { - m.baseGoMetrica.units = "nanoseconds" - m.baseGoMetrica.dataSourceKey = "debug.GCStats.Pause" - m.baseGoMetrica.basePath = "Runtime/GC/GCTime/" - m.baseGoMetrica.dataSource = ds - - component.AddMetrica(m) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gometrica.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gometrica.go deleted file mode 100644 index 52fcdd57..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gometrica.go +++ /dev/null @@ -1,105 +0,0 @@ -package gorelic - -import ( - "fmt" - metrics "github.com/yvasiyarov/go-metrics" -) - -const ( - histogramMin = iota - histogramMax - histogramMean - histogramPercentile - histogramStdDev - histogramVariance - noHistogramFunctions -) - -type goMetricaDataSource struct { - metrics.Registry -} - -func (ds goMetricaDataSource) GetGaugeValue(key string) (float64, error) { - if valueContainer := ds.Get(key); valueContainer == nil { - return 0, fmt.Errorf("metrica with name %s is not registered\n", key) - } else if gauge, ok := valueContainer.(metrics.Gauge); ok { - return float64(gauge.Value()), nil - } else { - return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer) - } -} - -func (ds goMetricaDataSource) GetHistogramValue(key string, statFunction int, percentile float64) (float64, error) { - if valueContainer := ds.Get(key); valueContainer == nil { - return 0, fmt.Errorf("metrica with name %s is not registered\n", key) - } else if histogram, ok := valueContainer.(metrics.Histogram); ok { - switch statFunction { - default: - return 0, fmt.Errorf("unsupported stat function for histogram: %s\n", statFunction) - case histogramMax: - return float64(histogram.Max()), nil - case histogramMin: - return float64(histogram.Min()), nil - case histogramMean: - return float64(histogram.Mean()), nil - case histogramStdDev: - return float64(histogram.StdDev()), nil - case histogramVariance: - return float64(histogram.Variance()), nil - case histogramPercentile: - return float64(histogram.Percentile(percentile)), nil - } - } else { - return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer) - } -} - -type baseGoMetrica struct { - dataSource goMetricaDataSource - basePath string - name string - units string - dataSourceKey string -} - -func (metrica *baseGoMetrica) GetName() string { - return metrica.basePath + metrica.name -} - -func (metrica *baseGoMetrica) GetUnits() string { - return metrica.units -} - -type gaugeMetrica struct { - *baseGoMetrica -} - -func (metrica *gaugeMetrica) GetValue() (float64, error) { - return metrica.dataSource.GetGaugeValue(metrica.dataSourceKey) -} - -type gaugeIncMetrica struct { - *baseGoMetrica - previousValue float64 -} - -func (metrica *gaugeIncMetrica) GetValue() (float64, error) { - var value float64 - var currentValue float64 - var err error - if currentValue, err = metrica.dataSource.GetGaugeValue(metrica.dataSourceKey); err == nil { - value = currentValue - metrica.previousValue - metrica.previousValue = currentValue - } - return value, err -} - -type histogramMetrica struct { - *baseGoMetrica - statFunction int - percentileValue float64 -} - -func (metrica *histogramMetrica) GetValue() (float64, error) { - return metrica.dataSource.GetHistogramValue(metrica.dataSourceKey, metrica.statFunction, metrica.percentileValue) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/http_metrics.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/http_metrics.go deleted file mode 100644 index e54cbd37..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/http_metrics.go +++ /dev/null @@ -1,194 +0,0 @@ -package gorelic - -import ( - metrics "github.com/yvasiyarov/go-metrics" - "github.com/yvasiyarov/newrelic_platform_go" - "net/http" - "time" -) - -type tHTTPHandlerFunc func(http.ResponseWriter, *http.Request) -type tHTTPHandler struct { - originalHandler http.Handler - originalHandlerFunc tHTTPHandlerFunc - isFunc bool - timer metrics.Timer -} - -var httpTimer metrics.Timer - -func newHTTPHandlerFunc(h tHTTPHandlerFunc) *tHTTPHandler { - return &tHTTPHandler{ - isFunc: true, - originalHandlerFunc: h, - } -} -func newHTTPHandler(h http.Handler) *tHTTPHandler { - return &tHTTPHandler{ - isFunc: false, - originalHandler: h, - } -} - -func (handler *tHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - startTime := time.Now() - defer handler.timer.UpdateSince(startTime) - - if handler.isFunc { - handler.originalHandlerFunc(w, req) - } else { - handler.originalHandler.ServeHTTP(w, req) - } -} - -type baseTimerMetrica struct { - dataSource metrics.Timer - name string - units string -} - -func (metrica *baseTimerMetrica) GetName() string { - return metrica.name -} - -func (metrica *baseTimerMetrica) GetUnits() string { - return metrica.units -} - -type timerRate1Metrica struct { - *baseTimerMetrica -} - -func (metrica *timerRate1Metrica) GetValue() (float64, error) { - return metrica.dataSource.Rate1(), nil -} - -type timerRateMeanMetrica struct { - *baseTimerMetrica -} - -func (metrica *timerRateMeanMetrica) GetValue() (float64, error) { - return metrica.dataSource.RateMean(), nil -} - -type timerMeanMetrica struct { - *baseTimerMetrica -} - -func (metrica *timerMeanMetrica) GetValue() (float64, error) { - return metrica.dataSource.Mean() / float64(time.Millisecond), nil -} - -type timerMinMetrica struct { - *baseTimerMetrica -} - -func (metrica *timerMinMetrica) GetValue() (float64, error) { - return float64(metrica.dataSource.Min()) / float64(time.Millisecond), nil -} - -type timerMaxMetrica struct { - *baseTimerMetrica -} - -func (metrica *timerMaxMetrica) GetValue() (float64, error) { - return float64(metrica.dataSource.Max()) / float64(time.Millisecond), nil -} - -type timerPercentile75Metrica struct { - *baseTimerMetrica -} - -func (metrica *timerPercentile75Metrica) GetValue() (float64, error) { - return metrica.dataSource.Percentile(0.75) / float64(time.Millisecond), nil -} - -type timerPercentile90Metrica struct { - *baseTimerMetrica -} - -func (metrica *timerPercentile90Metrica) GetValue() (float64, error) { - return metrica.dataSource.Percentile(0.90) / float64(time.Millisecond), nil -} - -type timerPercentile95Metrica struct { - *baseTimerMetrica -} - -func (metrica *timerPercentile95Metrica) GetValue() (float64, error) { - return metrica.dataSource.Percentile(0.95) / float64(time.Millisecond), nil -} - -func addHTTPMericsToComponent(component newrelic_platform_go.IComponent, timer metrics.Timer) { - rate1 := &timerRate1Metrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/throughput/1minute", - units: "rps", - dataSource: timer, - }, - } - component.AddMetrica(rate1) - - rateMean := &timerRateMeanMetrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/throughput/rateMean", - units: "rps", - dataSource: timer, - }, - } - component.AddMetrica(rateMean) - - responseTimeMean := &timerMeanMetrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/mean", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimeMean) - - responseTimeMax := &timerMaxMetrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/max", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimeMax) - - responseTimeMin := &timerMinMetrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/min", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimeMin) - - responseTimePercentile75 := &timerPercentile75Metrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/percentile75", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimePercentile75) - - responseTimePercentile90 := &timerPercentile90Metrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/percentile90", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimePercentile90) - - responseTimePercentile95 := &timerPercentile95Metrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/percentile95", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimePercentile95) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/memory_metrics.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/memory_metrics.go deleted file mode 100644 index 5c8d3e4e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/memory_metrics.go +++ /dev/null @@ -1,110 +0,0 @@ -package gorelic - -import ( - metrics "github.com/yvasiyarov/go-metrics" - "github.com/yvasiyarov/newrelic_platform_go" - "time" -) - -func newMemoryMetricaDataSource(pollInterval int) goMetricaDataSource { - r := metrics.NewRegistry() - - metrics.RegisterRuntimeMemStats(r) - metrics.CaptureRuntimeMemStatsOnce(r) - go metrics.CaptureRuntimeMemStats(r, time.Duration(pollInterval)*time.Second) - return goMetricaDataSource{r} -} - -func addMemoryMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) { - gaugeMetrics := []*baseGoMetrica{ - //Memory in use metrics - &baseGoMetrica{ - name: "InUse/Total", - units: "bytes", - dataSourceKey: "runtime.MemStats.Alloc", - }, - &baseGoMetrica{ - name: "InUse/Heap", - units: "bytes", - dataSourceKey: "runtime.MemStats.HeapAlloc", - }, - &baseGoMetrica{ - name: "InUse/Stack", - units: "bytes", - dataSourceKey: "runtime.MemStats.StackInuse", - }, - &baseGoMetrica{ - name: "InUse/MSpanInuse", - units: "bytes", - dataSourceKey: "runtime.MemStats.MSpanInuse", - }, - &baseGoMetrica{ - name: "InUse/MCacheInuse", - units: "bytes", - dataSourceKey: "runtime.MemStats.MCacheInuse", - }, - } - ds := newMemoryMetricaDataSource(pollInterval) - for _, m := range gaugeMetrics { - m.basePath = "Runtime/Memory/" - m.dataSource = ds - component.AddMetrica(&gaugeMetrica{m}) - } - - gaugeIncMetrics := []*baseGoMetrica{ - //NO operations graph - &baseGoMetrica{ - name: "Operations/NoPointerLookups", - units: "lookups", - dataSourceKey: "runtime.MemStats.Lookups", - }, - &baseGoMetrica{ - name: "Operations/NoMallocs", - units: "mallocs", - dataSourceKey: "runtime.MemStats.Mallocs", - }, - &baseGoMetrica{ - name: "Operations/NoFrees", - units: "frees", - dataSourceKey: "runtime.MemStats.Frees", - }, - - // Sytem memory allocations - &baseGoMetrica{ - name: "SysMem/Total", - units: "bytes", - dataSourceKey: "runtime.MemStats.Sys", - }, - &baseGoMetrica{ - name: "SysMem/Heap", - units: "bytes", - dataSourceKey: "runtime.MemStats.HeapSys", - }, - &baseGoMetrica{ - name: "SysMem/Stack", - units: "bytes", - dataSourceKey: "runtime.MemStats.StackSys", - }, - &baseGoMetrica{ - name: "SysMem/MSpan", - units: "bytes", - dataSourceKey: "runtime.MemStats.MSpanSys", - }, - &baseGoMetrica{ - name: "SysMem/MCache", - units: "bytes", - dataSourceKey: "runtime.MemStats.MCacheSys", - }, - &baseGoMetrica{ - name: "SysMem/BuckHash", - units: "bytes", - dataSourceKey: "runtime.MemStats.BuckHashSys", - }, - } - - for _, m := range gaugeIncMetrics { - m.basePath = "Runtime/Memory/" - m.dataSource = ds - component.AddMetrica(&gaugeIncMetrica{baseGoMetrica: m}) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/nut.json b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/nut.json deleted file mode 100644 index 7abb8ec6..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/nut.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Version": "0.0.6", - "Vendor": "yvasiyarov", - "Authors": [ - { - "FullName": "Yuriy Vasiyarov", - "Email": "varyous@gmail.com" - } - ], - "ExtraFiles": [ - "README.md", - "LICENSE" - ], - "Homepage": "https://github.com/yvasiyarov/gorelic" -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/runtime_metrics.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/runtime_metrics.go deleted file mode 100644 index 87a42ca6..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/runtime_metrics.go +++ /dev/null @@ -1,196 +0,0 @@ -package gorelic - -import ( - "fmt" - "github.com/yvasiyarov/newrelic_platform_go" - "io/ioutil" - "os" - "runtime" - "strconv" - "strings" - "time" -) - -const linuxSystemQueryInterval = 60 - -// Number of goroutines metrica -type noGoroutinesMetrica struct{} - -func (metrica *noGoroutinesMetrica) GetName() string { - return "Runtime/General/NOGoroutines" -} -func (metrica *noGoroutinesMetrica) GetUnits() string { - return "goroutines" -} -func (metrica *noGoroutinesMetrica) GetValue() (float64, error) { - return float64(runtime.NumGoroutine()), nil -} - -// Number of CGO calls metrica -type noCgoCallsMetrica struct { - lastValue int64 -} - -func (metrica *noCgoCallsMetrica) GetName() string { - return "Runtime/General/NOCgoCalls" -} -func (metrica *noCgoCallsMetrica) GetUnits() string { - return "calls" -} -func (metrica *noCgoCallsMetrica) GetValue() (float64, error) { - currentValue := runtime.NumCgoCall() - value := float64(currentValue - metrica.lastValue) - metrica.lastValue = currentValue - - return value, nil -} - -//OS specific metrics data source interface -type iSystemMetricaDataSource interface { - GetValue(key string) (float64, error) -} - -// iSystemMetricaDataSource fabrica -func newSystemMetricaDataSource() iSystemMetricaDataSource { - var ds iSystemMetricaDataSource - switch runtime.GOOS { - default: - ds = &systemMetricaDataSource{} - case "linux": - ds = &linuxSystemMetricaDataSource{ - systemData: make(map[string]string), - } - } - return ds -} - -//Default implementation of iSystemMetricaDataSource. Just return an error -type systemMetricaDataSource struct{} - -func (ds *systemMetricaDataSource) GetValue(key string) (float64, error) { - return 0, fmt.Errorf("this metrica was not implemented yet for %s", runtime.GOOS) -} - -// Linux OS implementation of ISystemMetricaDataSource -type linuxSystemMetricaDataSource struct { - lastUpdate time.Time - systemData map[string]string -} - -func (ds *linuxSystemMetricaDataSource) GetValue(key string) (float64, error) { - if err := ds.checkAndUpdateData(); err != nil { - return 0, err - } else if val, ok := ds.systemData[key]; !ok { - return 0, fmt.Errorf("system data with key %s was not found", key) - } else if key == "VmSize" || key == "VmPeak" || key == "VmHWM" || key == "VmRSS" { - valueParts := strings.Split(val, " ") - if len(valueParts) != 2 { - return 0, fmt.Errorf("invalid format for value %s", key) - } - valConverted, err := strconv.ParseFloat(valueParts[0], 64) - if err != nil { - return 0, err - } - switch valueParts[1] { - case "kB": - valConverted *= 1 << 10 - case "mB": - valConverted *= 1 << 20 - case "gB": - valConverted *= 1 << 30 - } - return valConverted, nil - } else if valConverted, err := strconv.ParseFloat(val, 64); err != nil { - return valConverted, nil - } else { - return valConverted, nil - } -} -func (ds *linuxSystemMetricaDataSource) checkAndUpdateData() error { - startTime := time.Now() - if startTime.Sub(ds.lastUpdate) > time.Second*linuxSystemQueryInterval { - path := fmt.Sprintf("/proc/%d/status", os.Getpid()) - rawStats, err := ioutil.ReadFile(path) - if err != nil { - return err - } - - lines := strings.Split(string(rawStats), "\n") - for _, line := range lines { - parts := strings.Split(line, ":") - if len(parts) == 2 { - k := strings.TrimSpace(parts[0]) - v := strings.TrimSpace(parts[1]) - - ds.systemData[k] = v - } - } - ds.lastUpdate = startTime - } - return nil -} - -// OS specific metrica -type systemMetrica struct { - sourceKey string - newrelicName string - units string - dataSource iSystemMetricaDataSource -} - -func (metrica *systemMetrica) GetName() string { - return metrica.newrelicName -} -func (metrica *systemMetrica) GetUnits() string { - return metrica.units -} -func (metrica *systemMetrica) GetValue() (float64, error) { - return metrica.dataSource.GetValue(metrica.sourceKey) -} - -func addRuntimeMericsToComponent(component newrelic_platform_go.IComponent) { - component.AddMetrica(&noGoroutinesMetrica{}) - component.AddMetrica(&noCgoCallsMetrica{}) - - ds := newSystemMetricaDataSource() - metrics := []*systemMetrica{ - &systemMetrica{ - sourceKey: "Threads", - units: "Threads", - newrelicName: "Runtime/System/Threads", - }, - &systemMetrica{ - sourceKey: "FDSize", - units: "fd", - newrelicName: "Runtime/System/FDSize", - }, - // Peak virtual memory size - &systemMetrica{ - sourceKey: "VmPeak", - units: "bytes", - newrelicName: "Runtime/System/Memory/VmPeakSize", - }, - //Virtual memory size - &systemMetrica{ - sourceKey: "VmSize", - units: "bytes", - newrelicName: "Runtime/System/Memory/VmCurrent", - }, - //Peak resident set size - &systemMetrica{ - sourceKey: "VmHWM", - units: "bytes", - newrelicName: "Runtime/System/Memory/RssPeak", - }, - //Resident set size - &systemMetrica{ - sourceKey: "VmRSS", - units: "bytes", - newrelicName: "Runtime/System/Memory/RssCurrent", - }, - } - for _, m := range metrics { - m.dataSource = ds - component.AddMetrica(m) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/.travis.yml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/LICENSE deleted file mode 100644 index 01a9a5c4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/README.md deleted file mode 100644 index 34462344..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/README.md +++ /dev/null @@ -1,11 +0,0 @@ -New Relic Platform Agent SDK for Go(golang) -==================== - -[![Build Status](https://travis-ci.org/yvasiyarov/newrelic_platform_go.png?branch=master)](https://travis-ci.org/yvasiyarov/newrelic_platform_go) - -This package provide very simple interface to NewRelic Platform http://newrelic.com/platform - -For example of usage see examples/wave_plugin.go - -For real-word example, you can have a look at: -https://github.com/yvasiyarov/newrelic_sphinx diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/agent.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/agent.go deleted file mode 100644 index d9d27535..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/agent.go +++ /dev/null @@ -1,27 +0,0 @@ -package newrelic_platform_go - -import ( - "log" - "os" -) - -type Agent struct { - Host string `json:"host"` - Version string `json:"version"` - Pid int `json:"pid"` -} - -func NewAgent(Version string) *Agent { - agent := &Agent{ - Version: Version, - } - return agent -} - -func (agent *Agent) CollectEnvironmentInfo() { - var err error - agent.Pid = os.Getpid() - if agent.Host, err = os.Hostname(); err != nil { - log.Fatalf("Can not get hostname: %#v \n", err) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/component.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/component.go deleted file mode 100644 index 000f7ab7..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/component.go +++ /dev/null @@ -1,71 +0,0 @@ -package newrelic_platform_go - -import ( - "log" - "math" -) - -type ComponentData interface{} -type IComponent interface { - Harvest(plugin INewrelicPlugin) ComponentData - SetDuration(duration int) - AddMetrica(model IMetrica) - ClearSentData() -} - -type PluginComponent struct { - Name string `json:"name"` - GUID string `json:"guid"` - Duration int `json:"duration"` - Metrics map[string]MetricaValue `json:"metrics"` - MetricaModels []IMetrica `json:"-"` -} - -func NewPluginComponent(name string, guid string) *PluginComponent { - c := &PluginComponent{ - Name: name, - GUID: guid, - } - return c -} - -func (component *PluginComponent) AddMetrica(model IMetrica) { - component.MetricaModels = append(component.MetricaModels, model) -} - -func (component *PluginComponent) ClearSentData() { - component.Metrics = nil -} - -func (component *PluginComponent) SetDuration(duration int) { - component.Duration = duration -} - -func (component *PluginComponent) Harvest(plugin INewrelicPlugin) ComponentData { - component.Metrics = make(map[string]MetricaValue, len(component.MetricaModels)) - for i := 0; i < len(component.MetricaModels); i++ { - model := component.MetricaModels[i] - metricaKey := plugin.GetMetricaKey(model) - - if newValue, err := model.GetValue(); err == nil { - if math.IsInf(newValue, 0) || math.IsNaN(newValue) { - newValue = 0 - } - - if existMetric, ok := component.Metrics[metricaKey]; ok { - if floatExistVal, ok := existMetric.(float64); ok { - component.Metrics[metricaKey] = NewAggregatedMetricaValue(floatExistVal, newValue) - } else if aggregatedValue, ok := existMetric.(*AggregatedMetricaValue); ok { - aggregatedValue.Aggregate(newValue) - } else { - panic("Invalid type in metrica value") - } - } else { - component.Metrics[metricaKey] = newValue - } - } else { - log.Printf("Can not get metrica: %v, got error:%#v", model.GetName(), err) - } - } - return component -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/doc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/doc.go deleted file mode 100644 index ef41e969..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package newrelic_platform_go is New Relic Platform Agent SDK for Go language. -package newrelic_platform_go diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/examples/wave_plugin.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/examples/wave_plugin.go deleted file mode 100644 index 57f3cf87..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/examples/wave_plugin.go +++ /dev/null @@ -1,72 +0,0 @@ -package main - -import ( - "github.com/yvasiyarov/newrelic_platform_go" -) - -type WaveMetrica struct { - sawtoothMax int - sawtoothCounter int -} - -func (metrica *WaveMetrica) GetName() string { - return "Wave_Metrica" -} -func (metrica *WaveMetrica) GetUnits() string { - return "Queries/Second" -} -func (metrica *WaveMetrica) GetValue() (float64, error) { - metrica.sawtoothCounter++ - if metrica.sawtoothCounter > metrica.sawtoothMax { - metrica.sawtoothCounter = 0 - } - return float64(metrica.sawtoothCounter), nil -} - -type SquareWaveMetrica struct { - squarewaveMax int - squarewaveCounter int -} - -func (metrica *SquareWaveMetrica) GetName() string { - return "SquareWave_Metrica" -} -func (metrica *SquareWaveMetrica) GetUnits() string { - return "Queries/Second" -} -func (metrica *SquareWaveMetrica) GetValue() (float64, error) { - returnValue := 0 - metrica.squarewaveCounter++ - - if metrica.squarewaveCounter < (metrica.squarewaveMax / 2) { - returnValue = 0 - } else { - returnValue = metrica.squarewaveMax - } - - if metrica.squarewaveCounter > metrica.squarewaveMax { - metrica.squarewaveCounter = 0 - } - return float64(returnValue), nil -} - -func main() { - plugin := newrelic_platform_go.NewNewrelicPlugin("0.0.1", "7bceac019c7dcafae1ef95be3e3a3ff8866de246", 60) - component := newrelic_platform_go.NewPluginComponent("Wave component", "com.exmaple.plugin.gowave") - plugin.AddComponent(component) - - m := &WaveMetrica{ - sawtoothMax: 10, - sawtoothCounter: 5, - } - component.AddMetrica(m) - - m1 := &SquareWaveMetrica{ - squarewaveMax: 4, - squarewaveCounter: 1, - } - component.AddMetrica(m1) - - plugin.Verbose = true - plugin.Run() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/metrica.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/metrica.go deleted file mode 100644 index fc4fbd48..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/metrica.go +++ /dev/null @@ -1,42 +0,0 @@ -package newrelic_platform_go - -import ( - "math" -) - -type IMetrica interface { - GetValue() (float64, error) - GetName() string - GetUnits() string -} - -type MetricaValue interface{} - -type SimpleMetricaValue float64 - -type AggregatedMetricaValue struct { - Min float64 `json:"min"` - Max float64 `json:"max"` - Total float64 `json:"total"` - Count int `json:"count"` - SumOfSquares float64 `json:"sum_of_squares"` -} - -func NewAggregatedMetricaValue(existValue float64, newValue float64) *AggregatedMetricaValue { - v := &AggregatedMetricaValue{ - Min: math.Min(newValue, existValue), - Max: math.Max(newValue, existValue), - Total: newValue + existValue, - Count: 2, - SumOfSquares: newValue*newValue + existValue*existValue, - } - return v -} - -func (aggregatedValue *AggregatedMetricaValue) Aggregate(newValue float64) { - aggregatedValue.Min = math.Min(newValue, aggregatedValue.Min) - aggregatedValue.Max = math.Max(newValue, aggregatedValue.Max) - aggregatedValue.Total += newValue - aggregatedValue.Count++ - aggregatedValue.SumOfSquares += newValue * newValue -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/nut.json b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/nut.json deleted file mode 100644 index 1e57c395..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/nut.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Version": "0.0.1", - "Vendor": "yvasiyarov", - "Authors": [ - { - "FullName": "Yuriy Vasiyarov", - "Email": "varyous@gmail.com" - } - ], - "ExtraFiles": [ - "README.md", - "LICENSE" - ], - "Homepage": "https://github.com/yvasiyarov/newrelic_platform_go" -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/plugin.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/plugin.go deleted file mode 100644 index 3e45666d..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/plugin.go +++ /dev/null @@ -1,194 +0,0 @@ -package newrelic_platform_go - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "net/http" - "strings" - "time" -) - -const ( - NEWRELIC_API_URL = "https://platform-api.newrelic.com/platform/v1/metrics" -) - -type INewrelicPlugin interface { - GetMetricaKey(metrica IMetrica) string - Harvest() error - Run() - AddComponent(component IComponent) -} -type NewrelicPlugin struct { - Agent *Agent `json:"agent"` - Components []ComponentData `json:"components"` - - ComponentModels []IComponent `json:"-"` - LastPollTime time.Time `json:"-"` - Verbose bool `json:"-"` - LicenseKey string `json:"-"` - PollIntervalInSecond int `json:"-"` -} - -func NewNewrelicPlugin(version string, licenseKey string, pollInterval int) *NewrelicPlugin { - plugin := &NewrelicPlugin{ - LicenseKey: licenseKey, - PollIntervalInSecond: pollInterval, - } - - plugin.Agent = NewAgent(version) - plugin.Agent.CollectEnvironmentInfo() - - plugin.ComponentModels = []IComponent{} - return plugin -} - -func (plugin *NewrelicPlugin) Harvest() error { - startTime := time.Now() - var duration int - if plugin.LastPollTime.IsZero() { - duration = plugin.PollIntervalInSecond - } else { - duration = int(startTime.Sub(plugin.LastPollTime).Seconds()) - } - - plugin.Components = make([]ComponentData, 0, len(plugin.ComponentModels)) - for i := 0; i < len(plugin.ComponentModels); i++ { - plugin.ComponentModels[i].SetDuration(duration) - plugin.Components = append(plugin.Components, plugin.ComponentModels[i].Harvest(plugin)) - } - - if httpCode, err := plugin.SendMetricas(); err != nil { - log.Printf("Can not send metricas to newrelic: %#v\n", err) - return err - } else { - - if plugin.Verbose { - log.Printf("Got HTTP response code:%d", httpCode) - } - - if err, isFatal := plugin.CheckResponse(httpCode); isFatal { - log.Printf("Got fatal error:%v\n", err) - return err - } else { - if err != nil { - log.Printf("WARNING: %v", err) - } - return err - } - } - return nil -} - -func (plugin *NewrelicPlugin) GetMetricaKey(metrica IMetrica) string { - var keyBuffer bytes.Buffer - - keyBuffer.WriteString("Component/") - keyBuffer.WriteString(metrica.GetName()) - keyBuffer.WriteString("[") - keyBuffer.WriteString(metrica.GetUnits()) - keyBuffer.WriteString("]") - - return keyBuffer.String() -} - -func (plugin *NewrelicPlugin) SendMetricas() (int, error) { - client := &http.Client{} - var metricasJson []byte - var encodingError error - - if plugin.Verbose { - metricasJson, encodingError = json.MarshalIndent(plugin, "", " ") - } else { - metricasJson, encodingError = json.Marshal(plugin) - } - - if encodingError != nil { - return 0, encodingError - } - - jsonAsString := string(metricasJson) - if plugin.Verbose { - log.Printf("Send data:%s \n", jsonAsString) - } - - if httpRequest, err := http.NewRequest("POST", NEWRELIC_API_URL, strings.NewReader(jsonAsString)); err != nil { - return 0, err - } else { - httpRequest.Header.Set("X-License-Key", plugin.LicenseKey) - httpRequest.Header.Set("Content-Type", "application/json") - httpRequest.Header.Set("Accept", "application/json") - - if httpResponse, err := client.Do(httpRequest); err != nil { - return 0, err - } else { - defer httpResponse.Body.Close() - return httpResponse.StatusCode, nil - } - } - - // we will never get there - return 0, nil -} - -func (plugin *NewrelicPlugin) ClearSentData() { - for _, component := range plugin.ComponentModels { - component.ClearSentData() - } - plugin.Components = nil - plugin.LastPollTime = time.Now() -} - -func (plugin *NewrelicPlugin) CheckResponse(httpResponseCode int) (error, bool) { - isFatal := false - var err error - switch httpResponseCode { - case http.StatusOK: - { - plugin.ClearSentData() - } - case http.StatusForbidden: - { - err = fmt.Errorf("Authentication error (no license key header, or invalid license key).\n") - isFatal = true - } - case http.StatusBadRequest: - { - err = fmt.Errorf("The request or headers are in the wrong format or the URL is incorrect.\n") - isFatal = true - } - case http.StatusNotFound: - { - err = fmt.Errorf("Invalid URL\n") - isFatal = true - } - case http.StatusRequestEntityTooLarge: - { - err = fmt.Errorf("Too many metrics were sent in one request, or too many components (instances) were specified in one request, or other single-request limits were reached.\n") - //discard metrics - plugin.ClearSentData() - } - case http.StatusInternalServerError, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout: - { - err = fmt.Errorf("Got %v response code.Metricas will be aggregated", httpResponseCode) - } - } - return err, isFatal -} - -func (plugin *NewrelicPlugin) Run() { - plugin.Harvest() - tickerChannel := time.Tick(time.Duration(plugin.PollIntervalInSecond) * time.Second) - for ts := range tickerChannel { - plugin.Harvest() - - if plugin.Verbose { - log.Printf("Harvest ended at:%v\n", ts) - } - } -} - -func (plugin *NewrelicPlugin) AddComponent(component IComponent) { - plugin.ComponentModels = append(plugin.ComponentModels, component) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/base64.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc311609..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index b8e18d74..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "golang.org/x/crypto/blowfish" - "io" - "strconv" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - ckey := append(key, 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n += 1 - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n += 1 - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt_test.go deleted file mode 100644 index f08a6f5b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/bcrypt/bcrypt_test.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import ( - "bytes" - "fmt" - "testing" -) - -func TestBcryptingIsEasy(t *testing.T) { - pass := []byte("mypassword") - hp, err := GenerateFromPassword(pass, 0) - if err != nil { - t.Fatalf("GenerateFromPassword error: %s", err) - } - - if CompareHashAndPassword(hp, pass) != nil { - t.Errorf("%v should hash %s correctly", hp, pass) - } - - notPass := "notthepass" - err = CompareHashAndPassword(hp, []byte(notPass)) - if err != ErrMismatchedHashAndPassword { - t.Errorf("%v and %s should be mismatched", hp, notPass) - } -} - -func TestBcryptingIsCorrect(t *testing.T) { - pass := []byte("allmine") - salt := []byte("XajjQvNhvvRt5GSeFk1xFe") - expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga") - - hash, err := bcrypt(pass, 10, salt) - if err != nil { - t.Fatalf("bcrypt blew up: %v", err) - } - if !bytes.HasSuffix(expectedHash, hash) { - t.Errorf("%v should be the suffix of %v", hash, expectedHash) - } - - h, err := newFromHash(expectedHash) - if err != nil { - t.Errorf("Unable to parse %s: %v", string(expectedHash), err) - } - - // This is not the safe way to compare these hashes. We do this only for - // testing clarity. Use bcrypt.CompareHashAndPassword() - if err == nil && !bytes.Equal(expectedHash, h.Hash()) { - t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash) - } -} - -func TestVeryShortPasswords(t *testing.T) { - key := []byte("k") - salt := []byte("XajjQvNhvvRt5GSeFk1xFe") - _, err := bcrypt(key, 10, salt) - if err != nil { - t.Errorf("One byte key resulted in error: %s", err) - } -} - -func TestTooLongPasswordsWork(t *testing.T) { - salt := []byte("XajjQvNhvvRt5GSeFk1xFe") - // One byte over the usual 56 byte limit that blowfish has - tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456") - tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C") - hash, err := bcrypt(tooLongPass, 10, salt) - if err != nil { - t.Fatalf("bcrypt blew up on long password: %v", err) - } - if !bytes.HasSuffix(tooLongExpected, hash) { - t.Errorf("%v should be the suffix of %v", hash, tooLongExpected) - } -} - -type InvalidHashTest struct { - err error - hash []byte -} - -var invalidTests = []InvalidHashTest{ - {ErrHashTooShort, []byte("$2a$10$fooo")}, - {ErrHashTooShort, []byte("$2a")}, - {HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")}, - {InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")}, - {InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")}, -} - -func TestInvalidHashErrors(t *testing.T) { - check := func(name string, expected, err error) { - if err == nil { - t.Errorf("%s: Should have returned an error", name) - } - if err != nil && err != expected { - t.Errorf("%s gave err %v but should have given %v", name, err, expected) - } - } - for _, iht := range invalidTests { - _, err := newFromHash(iht.hash) - check("newFromHash", iht.err, err) - err = CompareHashAndPassword(iht.hash, []byte("anything")) - check("CompareHashAndPassword", iht.err, err) - } -} - -func TestUnpaddedBase64Encoding(t *testing.T) { - original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30} - encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe") - - encoded := base64Encode(original) - - if !bytes.Equal(encodedOriginal, encoded) { - t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal) - } - - decoded, err := base64Decode(encodedOriginal) - if err != nil { - t.Fatalf("base64Decode blew up: %s", err) - } - - if !bytes.Equal(decoded, original) { - t.Errorf("Decoded %v should have equaled %v", decoded, original) - } -} - -func TestCost(t *testing.T) { - suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C" - for _, vers := range []string{"2a", "2"} { - for _, cost := range []int{4, 10} { - s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix) - h := []byte(s) - actual, err := Cost(h) - if err != nil { - t.Errorf("Cost, error: %s", err) - continue - } - if actual != cost { - t.Errorf("Cost, expected: %d, actual: %d", cost, actual) - } - } - } - _, err := Cost([]byte("$a$a$" + suffix)) - if err == nil { - t.Errorf("Cost, malformed but no error returned") - } -} - -func TestCostValidationInHash(t *testing.T) { - if testing.Short() { - return - } - - pass := []byte("mypassword") - - for c := 0; c < MinCost; c++ { - p, _ := newFromPassword(pass, c) - if p.cost != DefaultCost { - t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost) - } - } - - p, _ := newFromPassword(pass, 14) - if p.cost != 14 { - t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost) - } - - hp, _ := newFromHash(p.Hash()) - if p.cost != hp.cost { - t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost) - } - - _, err := newFromPassword(pass, 32) - if err == nil { - t.Fatalf("newFromPassword: should return a cost error") - } - if err != InvalidCostError(32) { - t.Errorf("newFromPassword: should return cost error, got %#v", err) - } -} - -func TestCostReturnsWithLeadingZeroes(t *testing.T) { - hp, _ := newFromPassword([]byte("abcdefgh"), 7) - cost := hp.Hash()[4:7] - expected := []byte("07$") - - if !bytes.Equal(expected, cost) { - t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected) - } -} - -func TestMinorNotRequired(t *testing.T) { - noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga") - h, err := newFromHash(noMinorHash) - if err != nil { - t.Fatalf("No minor hash blew up: %s", err) - } - if h.minor != 0 { - t.Errorf("Should leave minor version at 0, but was %d", h.minor) - } - - if !bytes.Equal(noMinorHash, h.Hash()) { - t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash()) - } -} - -func BenchmarkEqual(b *testing.B) { - b.StopTimer() - passwd := []byte("somepasswordyoulike") - hash, _ := GenerateFromPassword(passwd, 10) - b.StartTimer() - for i := 0; i < b.N; i++ { - CompareHashAndPassword(hash, passwd) - } -} - -func BenchmarkGeneration(b *testing.B) { - b.StopTimer() - passwd := []byte("mylongpassword1234") - b.StartTimer() - for i := 0; i < b.N; i++ { - GenerateFromPassword(passwd, 10) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f195..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go deleted file mode 100644 index 7afa1fdf..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/blowfish_test.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -import "testing" - -type CryptTest struct { - key []byte - in []byte - out []byte -} - -// Test vector values are from http://www.schneier.com/code/vectors.txt. -var encryptTests = []CryptTest{ - { - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}}, - { - []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, - []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, - []byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}}, - { - []byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - []byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, - []byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}}, - { - []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, - []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, - []byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}}, - - { - []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, - []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, - []byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}}, - { - []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, - []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, - []byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}}, - { - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}}, - { - []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10}, - []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, - []byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}}, - { - []byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57}, - []byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42}, - []byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}}, - { - []byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E}, - []byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA}, - []byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}}, - { - []byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86}, - []byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72}, - []byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}}, - { - []byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E}, - []byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A}, - []byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}}, - { - []byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6}, - []byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2}, - []byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}}, - { - []byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE}, - []byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A}, - []byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}}, - { - []byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6}, - []byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2}, - []byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}}, - { - []byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE}, - []byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A}, - []byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}}, - { - []byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16}, - []byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02}, - []byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}}, - { - []byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F}, - []byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A}, - []byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}}, - { - []byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46}, - []byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32}, - []byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}}, - { - []byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E}, - []byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA}, - []byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}}, - { - []byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76}, - []byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62}, - []byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}}, - { - []byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07}, - []byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2}, - []byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}}, - { - []byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F}, - []byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA}, - []byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}}, - { - []byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7}, - []byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92}, - []byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}}, - { - []byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF}, - []byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A}, - []byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}}, - { - []byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6}, - []byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2}, - []byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}}, - { - []byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF}, - []byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A}, - []byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}}, - { - []byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, - []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, - []byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}}, - { - []byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E}, - []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, - []byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}}, - { - []byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE}, - []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, - []byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}}, - { - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, - []byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}}, - { - []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - []byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}}, - { - []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, - []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - []byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}}, - { - []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10}, - []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, - []byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}}, -} - -func TestCipherEncrypt(t *testing.T) { - for i, tt := range encryptTests { - c, err := NewCipher(tt.key) - if err != nil { - t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err) - continue - } - ct := make([]byte, len(tt.out)) - c.Encrypt(ct, tt.in) - for j, v := range ct { - if v != tt.out[j] { - t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j]) - break - } - } - } -} - -func TestCipherDecrypt(t *testing.T) { - for i, tt := range encryptTests { - c, err := NewCipher(tt.key) - if err != nil { - t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err) - continue - } - pt := make([]byte, len(tt.in)) - c.Decrypt(pt, tt.out) - for j, v := range pt { - if v != tt.in[j] { - t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j]) - break - } - } - } -} - -func TestSaltedCipherKeyLength(t *testing.T) { - if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) { - t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0)) - } - - // A 57-byte key. One over the typical blowfish restriction. - key := []byte("012345678901234567890123456789012345678901234567890123456") - if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil { - t.Errorf("NewSaltedCipher with long key, gave error %#v", err) - } -} - -// Test vectors generated with Blowfish from OpenSSH. -var saltedVectors = [][8]byte{ - {0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e}, - {0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12}, - {0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad}, - {0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8}, - {0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8}, - {0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf}, - {0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9}, - {0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38}, - {0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4}, - {0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c}, - {0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5}, - {0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b}, - {0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47}, - {0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2}, - {0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19}, - {0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc}, - {0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93}, - {0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57}, - {0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08}, - {0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03}, - {0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f}, - {0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef}, - {0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71}, - {0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad}, - {0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe}, - {0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13}, - {0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe}, - {0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6}, - {0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6}, - {0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92}, - {0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56}, - {0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee}, -} - -func TestSaltedCipher(t *testing.T) { - var key, salt [32]byte - for i := range key { - key[i] = byte(i) - salt[i] = byte(i + 32) - } - for i, v := range saltedVectors { - c, err := NewSaltedCipher(key[:], salt[:i]) - if err != nil { - t.Fatal(err) - } - var buf [8]byte - c.Encrypt(buf[:], buf[:]) - if v != buf { - t.Errorf("%d: expected %x, got %x", i, v, buf) - } - } -} - -func BenchmarkExpandKeyWithSalt(b *testing.B) { - key := make([]byte, 32) - salt := make([]byte, 16) - c, _ := NewCipher(key) - for i := 0; i < b.N; i++ { - expandKeyWithSalt(key, salt, c) - } -} - -func BenchmarkExpandKey(b *testing.B) { - key := make([]byte, 32) - c, _ := NewCipher(key) - for i := 0; i < b.N; i++ { - ExpandKey(key, c) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/cipher.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 5019658a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -package blowfish - -// The code is a port of Bruce Schneier's C implementation. -// See http://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatiblity, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/const.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index 8c5ee4cb..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// http://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/context.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/context.go deleted file mode 100644 index 66aff7cb..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/context.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // DoSomething calls DoSomethingSlow and returns as soon as - // // it returns or ctx.Done is closed. - // func DoSomething(ctx context.Context) (Result, error) { - // c := make(chan Result, 1) - // go func() { c <- DoSomethingSlow(ctx) }() - // select { - // case res := <-c: - // return res, nil - // case <-ctx.Done(): - // return nil, ctx.Err() - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it's is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return &c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - if p, ok := parentCancelCtx(c.Context); ok { - p.mu.Lock() - if p.children != nil { - delete(p.children, c) - } - p.mu.Unlock() - } - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with the deadline -// timer, so code should call cancel as soon as the operations running in this -// Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(removeFromParent, err) - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with the deadline -// timer, so code should call cancel as soon as the operations running in this -// Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/context_test.go deleted file mode 100644 index 82d2494a..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/context_test.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "fmt" - "math/rand" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -// otherContext is a Context that's not one of the types defined in context.go. -// This lets us test code paths that differ based on the underlying type of the -// Context. -type otherContext struct { - Context -} - -func TestBackground(t *testing.T) { - c := Background() - if c == nil { - t.Fatalf("Background returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.Background"; got != want { - t.Errorf("Background().String() = %q want %q", got, want) - } -} - -func TestTODO(t *testing.T) { - c := TODO() - if c == nil { - t.Fatalf("TODO returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.TODO"; got != want { - t.Errorf("TODO().String() = %q want %q", got, want) - } -} - -func TestWithCancel(t *testing.T) { - c1, cancel := WithCancel(Background()) - - if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { - t.Errorf("c1.String() = %q want %q", got, want) - } - - o := otherContext{c1} - c2, _ := WithCancel(o) - contexts := []Context{c1, o, c2} - - for i, c := range contexts { - if d := c.Done(); d == nil { - t.Errorf("c[%d].Done() == %v want non-nil", i, d) - } - if e := c.Err(); e != nil { - t.Errorf("c[%d].Err() == %v want nil", i, e) - } - - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - } - - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - - for i, c := range contexts { - select { - case <-c.Done(): - default: - t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) - } - if e := c.Err(); e != Canceled { - t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) - } - } -} - -func TestParentFinishesChild(t *testing.T) { - // Context tree: - // parent -> cancelChild - // parent -> valueChild -> timerChild - parent, cancel := WithCancel(Background()) - cancelChild, stop := WithCancel(parent) - defer stop() - valueChild := WithValue(parent, "key", "value") - timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) - defer stop() - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-cancelChild.Done(): - t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) - case x := <-timerChild.Done(): - t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) - case x := <-valueChild.Done(): - t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) - default: - } - - // The parent's children should contain the two cancelable children. - pc := parent.(*cancelCtx) - cc := cancelChild.(*cancelCtx) - tc := timerChild.(*timerCtx) - pc.mu.Lock() - if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { - t.Errorf("bad linkage: pc.children = %v, want %v and %v", - pc.children, cc, tc) - } - pc.mu.Unlock() - - if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) - } - if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) - } - - cancel() - - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) - } - pc.mu.Unlock() - - // parent and children should all be finished. - check := func(ctx Context, name string) { - select { - case <-ctx.Done(): - default: - t.Errorf("<-%s.Done() blocked, but shouldn't have", name) - } - if e := ctx.Err(); e != Canceled { - t.Errorf("%s.Err() == %v want %v", name, e, Canceled) - } - } - check(parent, "parent") - check(cancelChild, "cancelChild") - check(valueChild, "valueChild") - check(timerChild, "timerChild") - - // WithCancel should return a canceled context on a canceled parent. - precanceledChild := WithValue(parent, "key", "value") - select { - case <-precanceledChild.Done(): - default: - t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") - } - if e := precanceledChild.Err(); e != Canceled { - t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) - } -} - -func TestChildFinishesFirst(t *testing.T) { - cancelable, stop := WithCancel(Background()) - defer stop() - for _, parent := range []Context{Background(), cancelable} { - child, cancel := WithCancel(parent) - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-child.Done(): - t.Errorf("<-child.Done() == %v want nothing (it should block)", x) - default: - } - - cc := child.(*cancelCtx) - pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() - if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { - t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) - } - - if pcok { - pc.mu.Lock() - if len(pc.children) != 1 || !pc.children[cc] { - t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) - } - pc.mu.Unlock() - } - - cancel() - - if pcok { - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) - } - pc.mu.Unlock() - } - - // child should be finished. - select { - case <-child.Done(): - default: - t.Errorf("<-child.Done() blocked, but shouldn't have") - } - if e := child.Err(); e != Canceled { - t.Errorf("child.Err() == %v want %v", e, Canceled) - } - - // parent should not be finished. - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - default: - } - if e := parent.Err(); e != nil { - t.Errorf("parent.Err() == %v want nil", e) - } - } -} - -func testDeadline(c Context, wait time.Duration, t *testing.T) { - select { - case <-time.After(wait): - t.Fatalf("context should have timed out") - case <-c.Done(): - } - if e := c.Err(); e != DeadlineExceeded { - t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) - } -} - -func TestDeadline(t *testing.T) { - c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o = otherContext{c} - c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 100*time.Millisecond) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o = otherContext{c} - c, _ = WithTimeout(o, 300*time.Millisecond) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestCanceledTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 200*time.Millisecond) - o := otherContext{c} - c, cancel := WithTimeout(o, 400*time.Millisecond) - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - select { - case <-c.Done(): - default: - t.Errorf("<-c.Done() blocked, but shouldn't have") - } - if e := c.Err(); e != Canceled { - t.Errorf("c.Err() == %v want %v", e, Canceled) - } -} - -type key1 int -type key2 int - -var k1 = key1(1) -var k2 = key2(1) // same int as k1, different type -var k3 = key2(3) // same type as k2, different int - -func TestValues(t *testing.T) { - check := func(c Context, nm, v1, v2, v3 string) { - if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { - t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) - } - if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { - t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) - } - if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { - t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) - } - } - - c0 := Background() - check(c0, "c0", "", "", "") - - c1 := WithValue(Background(), k1, "c1k1") - check(c1, "c1", "c1k1", "", "") - - if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { - t.Errorf("c.String() = %q want %q", got, want) - } - - c2 := WithValue(c1, k2, "c2k2") - check(c2, "c2", "c1k1", "c2k2", "") - - c3 := WithValue(c2, k3, "c3k3") - check(c3, "c2", "c1k1", "c2k2", "c3k3") - - c4 := WithValue(c3, k1, nil) - check(c4, "c4", "", "c2k2", "c3k3") - - o0 := otherContext{Background()} - check(o0, "o0", "", "", "") - - o1 := otherContext{WithValue(Background(), k1, "c1k1")} - check(o1, "o1", "c1k1", "", "") - - o2 := WithValue(o1, k2, "o2k2") - check(o2, "o2", "c1k1", "o2k2", "") - - o3 := otherContext{c4} - check(o3, "o3", "", "c2k2", "c3k3") - - o4 := WithValue(o3, k3, nil) - check(o4, "o4", "", "c2k2", "") -} - -func TestAllocs(t *testing.T) { - bg := Background() - for _, test := range []struct { - desc string - f func() - limit float64 - gccgoLimit float64 - }{ - { - desc: "Background()", - f: func() { Background() }, - limit: 0, - gccgoLimit: 0, - }, - { - desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), - f: func() { - c := WithValue(bg, k1, nil) - c.Value(k1) - }, - limit: 3, - gccgoLimit: 3, - }, - { - desc: "WithTimeout(bg, 15*time.Millisecond)", - f: func() { - c, _ := WithTimeout(bg, 15*time.Millisecond) - <-c.Done() - }, - limit: 8, - gccgoLimit: 13, - }, - { - desc: "WithCancel(bg)", - f: func() { - c, cancel := WithCancel(bg) - cancel() - <-c.Done() - }, - limit: 5, - gccgoLimit: 8, - }, - { - desc: "WithTimeout(bg, 100*time.Millisecond)", - f: func() { - c, cancel := WithTimeout(bg, 100*time.Millisecond) - cancel() - <-c.Done() - }, - limit: 8, - gccgoLimit: 25, - }, - } { - limit := test.limit - if runtime.Compiler == "gccgo" { - // gccgo does not yet do escape analysis. - // TOOD(iant): Remove this when gccgo does do escape analysis. - limit = test.gccgoLimit - } - if n := testing.AllocsPerRun(100, test.f); n > limit { - t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) - } - } -} - -func TestSimultaneousCancels(t *testing.T) { - root, cancel := WithCancel(Background()) - m := map[Context]CancelFunc{root: cancel} - q := []Context{root} - // Create a tree of contexts. - for len(q) != 0 && len(m) < 100 { - parent := q[0] - q = q[1:] - for i := 0; i < 4; i++ { - ctx, cancel := WithCancel(parent) - m[ctx] = cancel - q = append(q, ctx) - } - } - // Start all the cancels in a random order. - var wg sync.WaitGroup - wg.Add(len(m)) - for _, cancel := range m { - go func(cancel CancelFunc) { - cancel() - wg.Done() - }(cancel) - } - // Wait on all the contexts in a random order. - for ctx := range m { - select { - case <-ctx.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) - } - } - // Wait for all the cancel functions to return. - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) - } -} - -func TestInterlockedCancels(t *testing.T) { - parent, cancelParent := WithCancel(Background()) - child, cancelChild := WithCancel(parent) - go func() { - parent.Done() - cancelChild() - }() - cancelParent() - select { - case <-child.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) - } -} - -func TestLayersCancel(t *testing.T) { - testLayers(t, time.Now().UnixNano(), false) -} - -func TestLayersTimeout(t *testing.T) { - testLayers(t, time.Now().UnixNano(), true) -} - -func testLayers(t *testing.T, seed int64, testTimeout bool) { - rand.Seed(seed) - errorf := func(format string, a ...interface{}) { - t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) - } - const ( - timeout = 200 * time.Millisecond - minLayers = 30 - ) - type value int - var ( - vals []*value - cancels []CancelFunc - numTimers int - ctx = Background() - ) - for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { - switch rand.Intn(3) { - case 0: - v := new(value) - ctx = WithValue(ctx, v, v) - vals = append(vals, v) - case 1: - var cancel CancelFunc - ctx, cancel = WithCancel(ctx) - cancels = append(cancels, cancel) - case 2: - var cancel CancelFunc - ctx, cancel = WithTimeout(ctx, timeout) - cancels = append(cancels, cancel) - numTimers++ - } - } - checkValues := func(when string) { - for _, key := range vals { - if val := ctx.Value(key).(*value); key != val { - errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) - } - } - } - select { - case <-ctx.Done(): - errorf("ctx should not be canceled yet") - default: - } - if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { - t.Errorf("ctx.String() = %q want prefix %q", s, prefix) - } - t.Log(ctx) - checkValues("before cancel") - if testTimeout { - select { - case <-ctx.Done(): - case <-time.After(timeout + timeout/10): - errorf("ctx should have timed out") - } - checkValues("after timeout") - } else { - cancel := cancels[rand.Intn(len(cancels))] - cancel() - select { - case <-ctx.Done(): - default: - errorf("ctx should be canceled") - } - checkValues("after cancel") - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go deleted file mode 100644 index a6754dc3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context_test - -import ( - "fmt" - "time" - - "golang.org/x/net/context" -) - -func ExampleWithTimeout() { - // Pass a context with a timeout to tell a blocking function that it - // should abandon its work after the timeout elapses. - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) - select { - case <-time.After(200 * time.Millisecond): - fmt.Println("overslept") - case <-ctx.Done(): - fmt.Println(ctx.Err()) // prints "context deadline exceeded" - } - // Output: - // context deadline exceeded -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore deleted file mode 100644 index 191a5360..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -_* -*.swp -*.[568] -[568].out diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE deleted file mode 100644 index 545cf2d3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Gocheck - A rich testing framework for Go - -Copyright (c) 2010-2013 Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/README.md deleted file mode 100644 index 0ca9e572..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Instructions -============ - -Install the package with: - - go get gopkg.in/check.v1 - -Import it with: - - import "gopkg.in/check.v1" - -and use _check_ as the package name inside the code. - -For more details, visit the project page: - -* http://labix.org/gocheck - -and the API documentation: - -* https://gopkg.in/check.v1 diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/TODO b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/TODO deleted file mode 100644 index 33498270..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/TODO +++ /dev/null @@ -1,2 +0,0 @@ -- Assert(slice, Contains, item) -- Parallel test support diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go deleted file mode 100644 index 48cb8c81..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package check - -import ( - "fmt" - "runtime" - "time" -) - -var memStats runtime.MemStats - -// testingB is a type passed to Benchmark functions to manage benchmark -// timing and to specify the number of iterations to run. -type timer struct { - start time.Time // Time test or benchmark started - duration time.Duration - N int - bytes int64 - timerOn bool - benchTime time.Duration - // The initial states of memStats.Mallocs and memStats.TotalAlloc. - startAllocs uint64 - startBytes uint64 - // The net total of this test after being run. - netAllocs uint64 - netBytes uint64 -} - -// StartTimer starts timing a test. This function is called automatically -// before a benchmark starts, but it can also used to resume timing after -// a call to StopTimer. -func (c *C) StartTimer() { - if !c.timerOn { - c.start = time.Now() - c.timerOn = true - - runtime.ReadMemStats(&memStats) - c.startAllocs = memStats.Mallocs - c.startBytes = memStats.TotalAlloc - } -} - -// StopTimer stops timing a test. This can be used to pause the timer -// while performing complex initialization that you don't -// want to measure. -func (c *C) StopTimer() { - if c.timerOn { - c.duration += time.Now().Sub(c.start) - c.timerOn = false - runtime.ReadMemStats(&memStats) - c.netAllocs += memStats.Mallocs - c.startAllocs - c.netBytes += memStats.TotalAlloc - c.startBytes - } -} - -// ResetTimer sets the elapsed benchmark time to zero. -// It does not affect whether the timer is running. -func (c *C) ResetTimer() { - if c.timerOn { - c.start = time.Now() - runtime.ReadMemStats(&memStats) - c.startAllocs = memStats.Mallocs - c.startBytes = memStats.TotalAlloc - } - c.duration = 0 - c.netAllocs = 0 - c.netBytes = 0 -} - -// SetBytes informs the number of bytes that the benchmark processes -// on each iteration. If this is called in a benchmark it will also -// report MB/s. -func (c *C) SetBytes(n int64) { - c.bytes = n -} - -func (c *C) nsPerOp() int64 { - if c.N <= 0 { - return 0 - } - return c.duration.Nanoseconds() / int64(c.N) -} - -func (c *C) mbPerSec() float64 { - if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 { - return 0 - } - return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds() -} - -func (c *C) timerString() string { - if c.N <= 0 { - return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9) - } - mbs := c.mbPerSec() - mb := "" - if mbs != 0 { - mb = fmt.Sprintf("\t%7.2f MB/s", mbs) - } - nsop := c.nsPerOp() - ns := fmt.Sprintf("%10d ns/op", nsop) - if c.N > 0 && nsop < 100 { - // The format specifiers here make sure that - // the ones digits line up for all three possible formats. - if nsop < 10 { - ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) - } else { - ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) - } - } - memStats := "" - if c.benchMem { - allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N)) - allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N)) - memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs) - } - return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats) -} - -func min(x, y int) int { - if x > y { - return y - } - return x -} - -func max(x, y int) int { - if x < y { - return y - } - return x -} - -// roundDown10 rounds a number down to the nearest power of 10. -func roundDown10(n int) int { - var tens = 0 - // tens = floor(log_10(n)) - for n > 10 { - n = n / 10 - tens++ - } - // result = 10^tens - result := 1 - for i := 0; i < tens; i++ { - result *= 10 - } - return result -} - -// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. -func roundUp(n int) int { - base := roundDown10(n) - if n < (2 * base) { - return 2 * base - } - if n < (5 * base) { - return 5 * base - } - return 10 * base -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go deleted file mode 100644 index 4dd827c1..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// These tests verify the test running logic. - -package check_test - -import ( - "time" - . "gopkg.in/check.v1" -) - -var benchmarkS = Suite(&BenchmarkS{}) - -type BenchmarkS struct{} - -func (s *BenchmarkS) TestCountSuite(c *C) { - suitesRun += 1 -} - -func (s *BenchmarkS) TestBasicTestTiming(c *C) { - helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond} - output := String{} - runConf := RunConf{Output: &output, Verbose: true} - Run(&helper, &runConf) - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.001s\n" + - "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.000s\n" - c.Assert(output.value, Matches, expected) -} - -func (s *BenchmarkS) TestStreamTestTiming(c *C) { - helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond} - output := String{} - runConf := RunConf{Output: &output, Stream: true} - Run(&helper, &runConf) - - expected := "(?s).*\nPASS: check_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t *0\\.001s\n.*" - c.Assert(output.value, Matches, expected) -} - -func (s *BenchmarkS) TestBenchmark(c *C) { - helper := FixtureHelper{sleep: 100000} - output := String{} - runConf := RunConf{ - Output: &output, - Benchmark: true, - BenchmarkTime: 10000000, - Filter: "Benchmark1", - } - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Benchmark1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Benchmark1") - c.Check(helper.calls[6], Equals, "TearDownTest") - // ... and more. - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t *100\t *[12][0-9]{5} ns/op\n" - c.Assert(output.value, Matches, expected) -} - -func (s *BenchmarkS) TestBenchmarkBytes(c *C) { - helper := FixtureHelper{sleep: 100000} - output := String{} - runConf := RunConf{ - Output: &output, - Benchmark: true, - BenchmarkTime: 10000000, - Filter: "Benchmark2", - } - Run(&helper, &runConf) - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t *100\t *[12][0-9]{5} ns/op\t *[4-9]\\.[0-9]{2} MB/s\n" - c.Assert(output.value, Matches, expected) -} - -func (s *BenchmarkS) TestBenchmarkMem(c *C) { - helper := FixtureHelper{sleep: 100000} - output := String{} - runConf := RunConf{ - Output: &output, - Benchmark: true, - BenchmarkMem: true, - BenchmarkTime: 10000000, - Filter: "Benchmark3", - } - Run(&helper, &runConf) - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark3\t *100\t *[12][0-9]{5} ns/op\t *[0-9]+ B/op\t *[1-9] allocs/op\n" - c.Assert(output.value, Matches, expected) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go deleted file mode 100644 index e55f327c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// These initial tests are for bootstrapping. They verify that we can -// basically use the testing infrastructure itself to check if the test -// system is working. -// -// These tests use will break down the test runner badly in case of -// errors because if they simply fail, we can't be sure the developer -// will ever see anything (because failing means the failing system -// somehow isn't working! :-) -// -// Do not assume *any* internal functionality works as expected besides -// what's actually tested here. - -package check_test - -import ( - "fmt" - "gopkg.in/check.v1" - "strings" -) - -type BootstrapS struct{} - -var boostrapS = check.Suite(&BootstrapS{}) - -func (s *BootstrapS) TestCountSuite(c *check.C) { - suitesRun += 1 -} - -func (s *BootstrapS) TestFailedAndFail(c *check.C) { - if c.Failed() { - critical("c.Failed() must be false first!") - } - c.Fail() - if !c.Failed() { - critical("c.Fail() didn't put the test in a failed state!") - } - c.Succeed() -} - -func (s *BootstrapS) TestFailedAndSucceed(c *check.C) { - c.Fail() - c.Succeed() - if c.Failed() { - critical("c.Succeed() didn't put the test back in a non-failed state") - } -} - -func (s *BootstrapS) TestLogAndGetTestLog(c *check.C) { - c.Log("Hello there!") - log := c.GetTestLog() - if log != "Hello there!\n" { - critical(fmt.Sprintf("Log() or GetTestLog() is not working! Got: %#v", log)) - } -} - -func (s *BootstrapS) TestLogfAndGetTestLog(c *check.C) { - c.Logf("Hello %v", "there!") - log := c.GetTestLog() - if log != "Hello there!\n" { - critical(fmt.Sprintf("Logf() or GetTestLog() is not working! Got: %#v", log)) - } -} - -func (s *BootstrapS) TestRunShowsErrors(c *check.C) { - output := String{} - check.Run(&FailHelper{}, &check.RunConf{Output: &output}) - if strings.Index(output.value, "Expected failure!") == -1 { - critical(fmt.Sprintf("RunWithWriter() output did not contain the "+ - "expected failure! Got: %#v", - output.value)) - } -} - -func (s *BootstrapS) TestRunDoesntShowSuccesses(c *check.C) { - output := String{} - check.Run(&SuccessHelper{}, &check.RunConf{Output: &output}) - if strings.Index(output.value, "Expected success!") != -1 { - critical(fmt.Sprintf("RunWithWriter() output contained a successful "+ - "test! Got: %#v", - output.value)) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/check.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/check.go deleted file mode 100644 index ca8c0f92..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/check.go +++ /dev/null @@ -1,945 +0,0 @@ -// Package check is a rich testing extension for Go's testing package. -// -// For details about the project, see: -// -// http://labix.org/gocheck -// -package check - -import ( - "bytes" - "errors" - "fmt" - "io" - "math/rand" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -// ----------------------------------------------------------------------- -// Internal type which deals with suite method calling. - -const ( - fixtureKd = iota - testKd -) - -type funcKind int - -const ( - succeededSt = iota - failedSt - skippedSt - panickedSt - fixturePanickedSt - missedSt -) - -type funcStatus int - -// A method value can't reach its own Method structure. -type methodType struct { - reflect.Value - Info reflect.Method -} - -func newMethod(receiver reflect.Value, i int) *methodType { - return &methodType{receiver.Method(i), receiver.Type().Method(i)} -} - -func (method *methodType) PC() uintptr { - return method.Info.Func.Pointer() -} - -func (method *methodType) suiteName() string { - t := method.Info.Type.In(0) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t.Name() -} - -func (method *methodType) String() string { - return method.suiteName() + "." + method.Info.Name -} - -func (method *methodType) matches(re *regexp.Regexp) bool { - return (re.MatchString(method.Info.Name) || - re.MatchString(method.suiteName()) || - re.MatchString(method.String())) -} - -type C struct { - method *methodType - kind funcKind - testName string - status funcStatus - logb *logger - logw io.Writer - done chan *C - reason string - mustFail bool - tempDir *tempDir - benchMem bool - startTime time.Time - timer -} - -func (c *C) stopNow() { - runtime.Goexit() -} - -// logger is a concurrency safe byte.Buffer -type logger struct { - sync.Mutex - writer bytes.Buffer -} - -func (l *logger) Write(buf []byte) (int, error) { - l.Lock() - defer l.Unlock() - return l.writer.Write(buf) -} - -func (l *logger) WriteTo(w io.Writer) (int64, error) { - l.Lock() - defer l.Unlock() - return l.writer.WriteTo(w) -} - -func (l *logger) String() string { - l.Lock() - defer l.Unlock() - return l.writer.String() -} - -// ----------------------------------------------------------------------- -// Handling of temporary files and directories. - -type tempDir struct { - sync.Mutex - path string - counter int -} - -func (td *tempDir) newPath() string { - td.Lock() - defer td.Unlock() - if td.path == "" { - var err error - for i := 0; i != 100; i++ { - path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int()) - if err = os.Mkdir(path, 0700); err == nil { - td.path = path - break - } - } - if td.path == "" { - panic("Couldn't create temporary directory: " + err.Error()) - } - } - result := filepath.Join(td.path, strconv.Itoa(td.counter)) - td.counter += 1 - return result -} - -func (td *tempDir) removeAll() { - td.Lock() - defer td.Unlock() - if td.path != "" { - err := os.RemoveAll(td.path) - if err != nil { - fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error()) - } - } -} - -// Create a new temporary directory which is automatically removed after -// the suite finishes running. -func (c *C) MkDir() string { - path := c.tempDir.newPath() - if err := os.Mkdir(path, 0700); err != nil { - panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error())) - } - return path -} - -// ----------------------------------------------------------------------- -// Low-level logging functions. - -func (c *C) log(args ...interface{}) { - c.writeLog([]byte(fmt.Sprint(args...) + "\n")) -} - -func (c *C) logf(format string, args ...interface{}) { - c.writeLog([]byte(fmt.Sprintf(format+"\n", args...))) -} - -func (c *C) logNewLine() { - c.writeLog([]byte{'\n'}) -} - -func (c *C) writeLog(buf []byte) { - c.logb.Write(buf) - if c.logw != nil { - c.logw.Write(buf) - } -} - -func hasStringOrError(x interface{}) (ok bool) { - _, ok = x.(fmt.Stringer) - if ok { - return - } - _, ok = x.(error) - return -} - -func (c *C) logValue(label string, value interface{}) { - if label == "" { - if hasStringOrError(value) { - c.logf("... %#v (%q)", value, value) - } else { - c.logf("... %#v", value) - } - } else if value == nil { - c.logf("... %s = nil", label) - } else { - if hasStringOrError(value) { - fv := fmt.Sprintf("%#v", value) - qv := fmt.Sprintf("%q", value) - if fv != qv { - c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv) - return - } - } - if s, ok := value.(string); ok && isMultiLine(s) { - c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value)) - c.logMultiLine(s) - } else { - c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value) - } - } -} - -func (c *C) logMultiLine(s string) { - b := make([]byte, 0, len(s)*2) - i := 0 - n := len(s) - for i < n { - j := i + 1 - for j < n && s[j-1] != '\n' { - j++ - } - b = append(b, "... "...) - b = strconv.AppendQuote(b, s[i:j]) - if j < n { - b = append(b, " +"...) - } - b = append(b, '\n') - i = j - } - c.writeLog(b) -} - -func isMultiLine(s string) bool { - for i := 0; i+1 < len(s); i++ { - if s[i] == '\n' { - return true - } - } - return false -} - -func (c *C) logString(issue string) { - c.log("... ", issue) -} - -func (c *C) logCaller(skip int) { - // This is a bit heavier than it ought to be. - skip += 1 // Our own frame. - pc, callerFile, callerLine, ok := runtime.Caller(skip) - if !ok { - return - } - var testFile string - var testLine int - testFunc := runtime.FuncForPC(c.method.PC()) - if runtime.FuncForPC(pc) != testFunc { - for { - skip += 1 - if pc, file, line, ok := runtime.Caller(skip); ok { - // Note that the test line may be different on - // distinct calls for the same test. Showing - // the "internal" line is helpful when debugging. - if runtime.FuncForPC(pc) == testFunc { - testFile, testLine = file, line - break - } - } else { - break - } - } - } - if testFile != "" && (testFile != callerFile || testLine != callerLine) { - c.logCode(testFile, testLine) - } - c.logCode(callerFile, callerLine) -} - -func (c *C) logCode(path string, line int) { - c.logf("%s:%d:", nicePath(path), line) - code, err := printLine(path, line) - if code == "" { - code = "..." // XXX Open the file and take the raw line. - if err != nil { - code += err.Error() - } - } - c.log(indent(code, " ")) -} - -var valueGo = filepath.Join("reflect", "value.go") -var asmGo = filepath.Join("runtime", "asm_") - -func (c *C) logPanic(skip int, value interface{}) { - skip++ // Our own frame. - initialSkip := skip - for ; ; skip++ { - if pc, file, line, ok := runtime.Caller(skip); ok { - if skip == initialSkip { - c.logf("... Panic: %s (PC=0x%X)\n", value, pc) - } - name := niceFuncName(pc) - path := nicePath(file) - if strings.Contains(path, "/gopkg.in/check.v") { - continue - } - if name == "Value.call" && strings.HasSuffix(path, valueGo) { - continue - } - if name == "call16" && strings.Contains(path, asmGo) { - continue - } - c.logf("%s:%d\n in %s", nicePath(file), line, name) - } else { - break - } - } -} - -func (c *C) logSoftPanic(issue string) { - c.log("... Panic: ", issue) -} - -func (c *C) logArgPanic(method *methodType, expectedType string) { - c.logf("... Panic: %s argument should be %s", - niceFuncName(method.PC()), expectedType) -} - -// ----------------------------------------------------------------------- -// Some simple formatting helpers. - -var initWD, initWDErr = os.Getwd() - -func init() { - if initWDErr == nil { - initWD = strings.Replace(initWD, "\\", "/", -1) + "/" - } -} - -func nicePath(path string) string { - if initWDErr == nil { - if strings.HasPrefix(path, initWD) { - return path[len(initWD):] - } - } - return path -} - -func niceFuncPath(pc uintptr) string { - function := runtime.FuncForPC(pc) - if function != nil { - filename, line := function.FileLine(pc) - return fmt.Sprintf("%s:%d", nicePath(filename), line) - } - return "" -} - -func niceFuncName(pc uintptr) string { - function := runtime.FuncForPC(pc) - if function != nil { - name := path.Base(function.Name()) - if i := strings.Index(name, "."); i > 0 { - name = name[i+1:] - } - if strings.HasPrefix(name, "(*") { - if i := strings.Index(name, ")"); i > 0 { - name = name[2:i] + name[i+1:] - } - } - if i := strings.LastIndex(name, ".*"); i != -1 { - name = name[:i] + "." + name[i+2:] - } - if i := strings.LastIndex(name, "·"); i != -1 { - name = name[:i] + "." + name[i+2:] - } - return name - } - return "" -} - -// ----------------------------------------------------------------------- -// Result tracker to aggregate call results. - -type Result struct { - Succeeded int - Failed int - Skipped int - Panicked int - FixturePanicked int - ExpectedFailures int - Missed int // Not even tried to run, related to a panic in the fixture. - RunError error // Houston, we've got a problem. - WorkDir string // If KeepWorkDir is true -} - -type resultTracker struct { - result Result - _lastWasProblem bool - _waiting int - _missed int - _expectChan chan *C - _doneChan chan *C - _stopChan chan bool -} - -func newResultTracker() *resultTracker { - return &resultTracker{_expectChan: make(chan *C), // Synchronous - _doneChan: make(chan *C, 32), // Asynchronous - _stopChan: make(chan bool)} // Synchronous -} - -func (tracker *resultTracker) start() { - go tracker._loopRoutine() -} - -func (tracker *resultTracker) waitAndStop() { - <-tracker._stopChan -} - -func (tracker *resultTracker) expectCall(c *C) { - tracker._expectChan <- c -} - -func (tracker *resultTracker) callDone(c *C) { - tracker._doneChan <- c -} - -func (tracker *resultTracker) _loopRoutine() { - for { - var c *C - if tracker._waiting > 0 { - // Calls still running. Can't stop. - select { - // XXX Reindent this (not now to make diff clear) - case c = <-tracker._expectChan: - tracker._waiting += 1 - case c = <-tracker._doneChan: - tracker._waiting -= 1 - switch c.status { - case succeededSt: - if c.kind == testKd { - if c.mustFail { - tracker.result.ExpectedFailures++ - } else { - tracker.result.Succeeded++ - } - } - case failedSt: - tracker.result.Failed++ - case panickedSt: - if c.kind == fixtureKd { - tracker.result.FixturePanicked++ - } else { - tracker.result.Panicked++ - } - case fixturePanickedSt: - // Track it as missed, since the panic - // was on the fixture, not on the test. - tracker.result.Missed++ - case missedSt: - tracker.result.Missed++ - case skippedSt: - if c.kind == testKd { - tracker.result.Skipped++ - } - } - } - } else { - // No calls. Can stop, but no done calls here. - select { - case tracker._stopChan <- true: - return - case c = <-tracker._expectChan: - tracker._waiting += 1 - case c = <-tracker._doneChan: - panic("Tracker got an unexpected done call.") - } - } - } -} - -// ----------------------------------------------------------------------- -// The underlying suite runner. - -type suiteRunner struct { - suite interface{} - setUpSuite, tearDownSuite *methodType - setUpTest, tearDownTest *methodType - tests []*methodType - tracker *resultTracker - tempDir *tempDir - keepDir bool - output *outputWriter - reportedProblemLast bool - benchTime time.Duration - benchMem bool -} - -type RunConf struct { - Output io.Writer - Stream bool - Verbose bool - Filter string - Benchmark bool - BenchmarkTime time.Duration // Defaults to 1 second - BenchmarkMem bool - KeepWorkDir bool -} - -// Create a new suiteRunner able to run all methods in the given suite. -func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner { - var conf RunConf - if runConf != nil { - conf = *runConf - } - if conf.Output == nil { - conf.Output = os.Stdout - } - if conf.Benchmark { - conf.Verbose = true - } - - suiteType := reflect.TypeOf(suite) - suiteNumMethods := suiteType.NumMethod() - suiteValue := reflect.ValueOf(suite) - - runner := &suiteRunner{ - suite: suite, - output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose), - tracker: newResultTracker(), - benchTime: conf.BenchmarkTime, - benchMem: conf.BenchmarkMem, - tempDir: &tempDir{}, - keepDir: conf.KeepWorkDir, - tests: make([]*methodType, 0, suiteNumMethods), - } - if runner.benchTime == 0 { - runner.benchTime = 1 * time.Second - } - - var filterRegexp *regexp.Regexp - if conf.Filter != "" { - if regexp, err := regexp.Compile(conf.Filter); err != nil { - msg := "Bad filter expression: " + err.Error() - runner.tracker.result.RunError = errors.New(msg) - return runner - } else { - filterRegexp = regexp - } - } - - for i := 0; i != suiteNumMethods; i++ { - method := newMethod(suiteValue, i) - switch method.Info.Name { - case "SetUpSuite": - runner.setUpSuite = method - case "TearDownSuite": - runner.tearDownSuite = method - case "SetUpTest": - runner.setUpTest = method - case "TearDownTest": - runner.tearDownTest = method - default: - prefix := "Test" - if conf.Benchmark { - prefix = "Benchmark" - } - if !strings.HasPrefix(method.Info.Name, prefix) { - continue - } - if filterRegexp == nil || method.matches(filterRegexp) { - runner.tests = append(runner.tests, method) - } - } - } - return runner -} - -// Run all methods in the given suite. -func (runner *suiteRunner) run() *Result { - if runner.tracker.result.RunError == nil && len(runner.tests) > 0 { - runner.tracker.start() - if runner.checkFixtureArgs() { - c := runner.runFixture(runner.setUpSuite, "", nil) - if c == nil || c.status == succeededSt { - for i := 0; i != len(runner.tests); i++ { - c := runner.runTest(runner.tests[i]) - if c.status == fixturePanickedSt { - runner.skipTests(missedSt, runner.tests[i+1:]) - break - } - } - } else if c != nil && c.status == skippedSt { - runner.skipTests(skippedSt, runner.tests) - } else { - runner.skipTests(missedSt, runner.tests) - } - runner.runFixture(runner.tearDownSuite, "", nil) - } else { - runner.skipTests(missedSt, runner.tests) - } - runner.tracker.waitAndStop() - if runner.keepDir { - runner.tracker.result.WorkDir = runner.tempDir.path - } else { - runner.tempDir.removeAll() - } - } - return &runner.tracker.result -} - -// Create a call object with the given suite method, and fork a -// goroutine with the provided dispatcher for running it. -func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { - var logw io.Writer - if runner.output.Stream { - logw = runner.output - } - if logb == nil { - logb = new(logger) - } - c := &C{ - method: method, - kind: kind, - testName: testName, - logb: logb, - logw: logw, - tempDir: runner.tempDir, - done: make(chan *C, 1), - timer: timer{benchTime: runner.benchTime}, - startTime: time.Now(), - benchMem: runner.benchMem, - } - runner.tracker.expectCall(c) - go (func() { - runner.reportCallStarted(c) - defer runner.callDone(c) - dispatcher(c) - })() - return c -} - -// Same as forkCall(), but wait for call to finish before returning. -func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { - c := runner.forkCall(method, kind, testName, logb, dispatcher) - <-c.done - return c -} - -// Handle a finished call. If there were any panics, update the call status -// accordingly. Then, mark the call as done and report to the tracker. -func (runner *suiteRunner) callDone(c *C) { - value := recover() - if value != nil { - switch v := value.(type) { - case *fixturePanic: - if v.status == skippedSt { - c.status = skippedSt - } else { - c.logSoftPanic("Fixture has panicked (see related PANIC)") - c.status = fixturePanickedSt - } - default: - c.logPanic(1, value) - c.status = panickedSt - } - } - if c.mustFail { - switch c.status { - case failedSt: - c.status = succeededSt - case succeededSt: - c.status = failedSt - c.logString("Error: Test succeeded, but was expected to fail") - c.logString("Reason: " + c.reason) - } - } - - runner.reportCallDone(c) - c.done <- c -} - -// Runs a fixture call synchronously. The fixture will still be run in a -// goroutine like all suite methods, but this method will not return -// while the fixture goroutine is not done, because the fixture must be -// run in a desired order. -func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C { - if method != nil { - c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) { - c.ResetTimer() - c.StartTimer() - defer c.StopTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - }) - return c - } - return nil -} - -// Run the fixture method with runFixture(), but panic with a fixturePanic{} -// in case the fixture method panics. This makes it easier to track the -// fixture panic together with other call panics within forkTest(). -func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C { - if skipped != nil && *skipped { - return nil - } - c := runner.runFixture(method, testName, logb) - if c != nil && c.status != succeededSt { - if skipped != nil { - *skipped = c.status == skippedSt - } - panic(&fixturePanic{c.status, method}) - } - return c -} - -type fixturePanic struct { - status funcStatus - method *methodType -} - -// Run the suite test method, together with the test-specific fixture, -// asynchronously. -func (runner *suiteRunner) forkTest(method *methodType) *C { - testName := method.String() - return runner.forkCall(method, testKd, testName, nil, func(c *C) { - var skipped bool - defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped) - defer c.StopTimer() - benchN := 1 - for { - runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped) - mt := c.method.Type() - if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) { - // Rather than a plain panic, provide a more helpful message when - // the argument type is incorrect. - c.status = panickedSt - c.logArgPanic(c.method, "*check.C") - return - } - if strings.HasPrefix(c.method.Info.Name, "Test") { - c.ResetTimer() - c.StartTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - return - } - if !strings.HasPrefix(c.method.Info.Name, "Benchmark") { - panic("unexpected method prefix: " + c.method.Info.Name) - } - - runtime.GC() - c.N = benchN - c.ResetTimer() - c.StartTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - c.StopTimer() - if c.status != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 { - return - } - perOpN := int(1e9) - if c.nsPerOp() != 0 { - perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp()) - } - - // Logic taken from the stock testing package: - // - Run more iterations than we think we'll need for a second (1.5x). - // - Don't grow too fast in case we had timing errors previously. - // - Be sure to run at least one more than last time. - benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1) - benchN = roundUp(benchN) - - skipped = true // Don't run the deferred one if this panics. - runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil) - skipped = false - } - }) -} - -// Same as forkTest(), but wait for the test to finish before returning. -func (runner *suiteRunner) runTest(method *methodType) *C { - c := runner.forkTest(method) - <-c.done - return c -} - -// Helper to mark tests as skipped or missed. A bit heavy for what -// it does, but it enables homogeneous handling of tracking, including -// nice verbose output. -func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) { - for _, method := range methods { - runner.runFunc(method, testKd, "", nil, func(c *C) { - c.status = status - }) - } -} - -// Verify if the fixture arguments are *check.C. In case of errors, -// log the error as a panic in the fixture method call, and return false. -func (runner *suiteRunner) checkFixtureArgs() bool { - succeeded := true - argType := reflect.TypeOf(&C{}) - for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} { - if method != nil { - mt := method.Type() - if mt.NumIn() != 1 || mt.In(0) != argType { - succeeded = false - runner.runFunc(method, fixtureKd, "", nil, func(c *C) { - c.logArgPanic(method, "*check.C") - c.status = panickedSt - }) - } - } - } - return succeeded -} - -func (runner *suiteRunner) reportCallStarted(c *C) { - runner.output.WriteCallStarted("START", c) -} - -func (runner *suiteRunner) reportCallDone(c *C) { - runner.tracker.callDone(c) - switch c.status { - case succeededSt: - if c.mustFail { - runner.output.WriteCallSuccess("FAIL EXPECTED", c) - } else { - runner.output.WriteCallSuccess("PASS", c) - } - case skippedSt: - runner.output.WriteCallSuccess("SKIP", c) - case failedSt: - runner.output.WriteCallProblem("FAIL", c) - case panickedSt: - runner.output.WriteCallProblem("PANIC", c) - case fixturePanickedSt: - // That's a testKd call reporting that its fixture - // has panicked. The fixture call which caused the - // panic itself was tracked above. We'll report to - // aid debugging. - runner.output.WriteCallProblem("PANIC", c) - case missedSt: - runner.output.WriteCallSuccess("MISS", c) - } -} - -// ----------------------------------------------------------------------- -// Output writer manages atomic output writing according to settings. - -type outputWriter struct { - m sync.Mutex - writer io.Writer - wroteCallProblemLast bool - Stream bool - Verbose bool -} - -func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { - return &outputWriter{writer: writer, Stream: stream, Verbose: verbose} -} - -func (ow *outputWriter) Write(content []byte) (n int, err error) { - ow.m.Lock() - n, err = ow.writer.Write(content) - ow.m.Unlock() - return -} - -func (ow *outputWriter) WriteCallStarted(label string, c *C) { - if ow.Stream { - header := renderCallHeader(label, c, "", "\n") - ow.m.Lock() - ow.writer.Write([]byte(header)) - ow.m.Unlock() - } -} - -func (ow *outputWriter) WriteCallProblem(label string, c *C) { - var prefix string - if !ow.Stream { - prefix = "\n-----------------------------------" + - "-----------------------------------\n" - } - header := renderCallHeader(label, c, prefix, "\n\n") - ow.m.Lock() - ow.wroteCallProblemLast = true - ow.writer.Write([]byte(header)) - if !ow.Stream { - c.logb.WriteTo(ow.writer) - } - ow.m.Unlock() -} - -func (ow *outputWriter) WriteCallSuccess(label string, c *C) { - if ow.Stream || (ow.Verbose && c.kind == testKd) { - // TODO Use a buffer here. - var suffix string - if c.reason != "" { - suffix = " (" + c.reason + ")" - } - if c.status == succeededSt { - suffix += "\t" + c.timerString() - } - suffix += "\n" - if ow.Stream { - suffix += "\n" - } - header := renderCallHeader(label, c, "", suffix) - ow.m.Lock() - // Resist temptation of using line as prefix above due to race. - if !ow.Stream && ow.wroteCallProblemLast { - header = "\n-----------------------------------" + - "-----------------------------------\n" + - header - } - ow.wroteCallProblemLast = false - ow.writer.Write([]byte(header)) - ow.m.Unlock() - } -} - -func renderCallHeader(label string, c *C, prefix, suffix string) string { - pc := c.method.PC() - return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc), - niceFuncName(pc), suffix) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go deleted file mode 100644 index 871b3252..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go +++ /dev/null @@ -1,207 +0,0 @@ -// This file contains just a few generic helpers which are used by the -// other test files. - -package check_test - -import ( - "flag" - "fmt" - "os" - "regexp" - "runtime" - "testing" - "time" - - "gopkg.in/check.v1" -) - -// We count the number of suites run at least to get a vague hint that the -// test suite is behaving as it should. Otherwise a bug introduced at the -// very core of the system could go unperceived. -const suitesRunExpected = 8 - -var suitesRun int = 0 - -func Test(t *testing.T) { - check.TestingT(t) - if suitesRun != suitesRunExpected && flag.Lookup("check.f").Value.String() == "" { - critical(fmt.Sprintf("Expected %d suites to run rather than %d", - suitesRunExpected, suitesRun)) - } -} - -// ----------------------------------------------------------------------- -// Helper functions. - -// Break down badly. This is used in test cases which can't yet assume -// that the fundamental bits are working. -func critical(error string) { - fmt.Fprintln(os.Stderr, "CRITICAL: "+error) - os.Exit(1) -} - -// Return the file line where it's called. -func getMyLine() int { - if _, _, line, ok := runtime.Caller(1); ok { - return line - } - return -1 -} - -// ----------------------------------------------------------------------- -// Helper type implementing a basic io.Writer for testing output. - -// Type implementing the io.Writer interface for analyzing output. -type String struct { - value string -} - -// The only function required by the io.Writer interface. Will append -// written data to the String.value string. -func (s *String) Write(p []byte) (n int, err error) { - s.value += string(p) - return len(p), nil -} - -// Trivial wrapper to test errors happening on a different file -// than the test itself. -func checkEqualWrapper(c *check.C, obtained, expected interface{}) (result bool, line int) { - return c.Check(obtained, check.Equals, expected), getMyLine() -} - -// ----------------------------------------------------------------------- -// Helper suite for testing basic fail behavior. - -type FailHelper struct { - testLine int -} - -func (s *FailHelper) TestLogAndFail(c *check.C) { - s.testLine = getMyLine() - 1 - c.Log("Expected failure!") - c.Fail() -} - -// ----------------------------------------------------------------------- -// Helper suite for testing basic success behavior. - -type SuccessHelper struct{} - -func (s *SuccessHelper) TestLogAndSucceed(c *check.C) { - c.Log("Expected success!") -} - -// ----------------------------------------------------------------------- -// Helper suite for testing ordering and behavior of fixture. - -type FixtureHelper struct { - calls []string - panicOn string - skip bool - skipOnN int - sleepOn string - sleep time.Duration - bytes int64 -} - -func (s *FixtureHelper) trace(name string, c *check.C) { - s.calls = append(s.calls, name) - if name == s.panicOn { - panic(name) - } - if s.sleep > 0 && s.sleepOn == name { - time.Sleep(s.sleep) - } - if s.skip && s.skipOnN == len(s.calls)-1 { - c.Skip("skipOnN == n") - } -} - -func (s *FixtureHelper) SetUpSuite(c *check.C) { - s.trace("SetUpSuite", c) -} - -func (s *FixtureHelper) TearDownSuite(c *check.C) { - s.trace("TearDownSuite", c) -} - -func (s *FixtureHelper) SetUpTest(c *check.C) { - s.trace("SetUpTest", c) -} - -func (s *FixtureHelper) TearDownTest(c *check.C) { - s.trace("TearDownTest", c) -} - -func (s *FixtureHelper) Test1(c *check.C) { - s.trace("Test1", c) -} - -func (s *FixtureHelper) Test2(c *check.C) { - s.trace("Test2", c) -} - -func (s *FixtureHelper) Benchmark1(c *check.C) { - s.trace("Benchmark1", c) - for i := 0; i < c.N; i++ { - time.Sleep(s.sleep) - } -} - -func (s *FixtureHelper) Benchmark2(c *check.C) { - s.trace("Benchmark2", c) - c.SetBytes(1024) - for i := 0; i < c.N; i++ { - time.Sleep(s.sleep) - } -} - -func (s *FixtureHelper) Benchmark3(c *check.C) { - var x []int64 - s.trace("Benchmark3", c) - for i := 0; i < c.N; i++ { - time.Sleep(s.sleep) - x = make([]int64, 5) - _ = x - } -} - -// ----------------------------------------------------------------------- -// Helper which checks the state of the test and ensures that it matches -// the given expectations. Depends on c.Errorf() working, so shouldn't -// be used to test this one function. - -type expectedState struct { - name string - result interface{} - failed bool - log string -} - -// Verify the state of the test. Note that since this also verifies if -// the test is supposed to be in a failed state, no other checks should -// be done in addition to what is being tested. -func checkState(c *check.C, result interface{}, expected *expectedState) { - failed := c.Failed() - c.Succeed() - log := c.GetTestLog() - matched, matchError := regexp.MatchString("^"+expected.log+"$", log) - if matchError != nil { - c.Errorf("Error in matching expression used in testing %s", - expected.name) - } else if !matched { - c.Errorf("%s logged:\n----------\n%s----------\n\nExpected:\n----------\n%s\n----------", - expected.name, log, expected.log) - } - if result != expected.result { - c.Errorf("%s returned %#v rather than %#v", - expected.name, result, expected.result) - } - if failed != expected.failed { - if failed { - c.Errorf("%s has failed when it shouldn't", expected.name) - } else { - c.Errorf("%s has not failed when it should", expected.name) - } - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go deleted file mode 100644 index bac33872..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go +++ /dev/null @@ -1,458 +0,0 @@ -package check - -import ( - "fmt" - "reflect" - "regexp" -) - -// ----------------------------------------------------------------------- -// CommentInterface and Commentf helper, to attach extra information to checks. - -type comment struct { - format string - args []interface{} -} - -// Commentf returns an infomational value to use with Assert or Check calls. -// If the checker test fails, the provided arguments will be passed to -// fmt.Sprintf, and will be presented next to the logged failure. -// -// For example: -// -// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i)) -// -// Note that if the comment is constant, a better option is to -// simply use a normal comment right above or next to the line, as -// it will also get printed with any errors: -// -// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123) -// -func Commentf(format string, args ...interface{}) CommentInterface { - return &comment{format, args} -} - -// CommentInterface must be implemented by types that attach extra -// information to failed checks. See the Commentf function for details. -type CommentInterface interface { - CheckCommentString() string -} - -func (c *comment) CheckCommentString() string { - return fmt.Sprintf(c.format, c.args...) -} - -// ----------------------------------------------------------------------- -// The Checker interface. - -// The Checker interface must be provided by checkers used with -// the Assert and Check verification methods. -type Checker interface { - Info() *CheckerInfo - Check(params []interface{}, names []string) (result bool, error string) -} - -// See the Checker interface. -type CheckerInfo struct { - Name string - Params []string -} - -func (info *CheckerInfo) Info() *CheckerInfo { - return info -} - -// ----------------------------------------------------------------------- -// Not checker logic inverter. - -// The Not checker inverts the logic of the provided checker. The -// resulting checker will succeed where the original one failed, and -// vice-versa. -// -// For example: -// -// c.Assert(a, Not(Equals), b) -// -func Not(checker Checker) Checker { - return ¬Checker{checker} -} - -type notChecker struct { - sub Checker -} - -func (checker *notChecker) Info() *CheckerInfo { - info := *checker.sub.Info() - info.Name = "Not(" + info.Name + ")" - return &info -} - -func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { - result, error = checker.sub.Check(params, names) - result = !result - return -} - -// ----------------------------------------------------------------------- -// IsNil checker. - -type isNilChecker struct { - *CheckerInfo -} - -// The IsNil checker tests whether the obtained value is nil. -// -// For example: -// -// c.Assert(err, IsNil) -// -var IsNil Checker = &isNilChecker{ - &CheckerInfo{Name: "IsNil", Params: []string{"value"}}, -} - -func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) { - return isNil(params[0]), "" -} - -func isNil(obtained interface{}) (result bool) { - if obtained == nil { - result = true - } else { - switch v := reflect.ValueOf(obtained); v.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - } - return -} - -// ----------------------------------------------------------------------- -// NotNil checker. Alias for Not(IsNil), since it's so common. - -type notNilChecker struct { - *CheckerInfo -} - -// The NotNil checker verifies that the obtained value is not nil. -// -// For example: -// -// c.Assert(iface, NotNil) -// -// This is an alias for Not(IsNil), made available since it's a -// fairly common check. -// -var NotNil Checker = ¬NilChecker{ - &CheckerInfo{Name: "NotNil", Params: []string{"value"}}, -} - -func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) { - return !isNil(params[0]), "" -} - -// ----------------------------------------------------------------------- -// Equals checker. - -type equalsChecker struct { - *CheckerInfo -} - -// The Equals checker verifies that the obtained value is equal to -// the expected value, according to usual Go semantics for ==. -// -// For example: -// -// c.Assert(value, Equals, 42) -// -var Equals Checker = &equalsChecker{ - &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}}, -} - -func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) { - defer func() { - if v := recover(); v != nil { - result = false - error = fmt.Sprint(v) - } - }() - return params[0] == params[1], "" -} - -// ----------------------------------------------------------------------- -// DeepEquals checker. - -type deepEqualsChecker struct { - *CheckerInfo -} - -// The DeepEquals checker verifies that the obtained value is deep-equal to -// the expected value. The check will work correctly even when facing -// slices, interfaces, and values of different types (which always fail -// the test). -// -// For example: -// -// c.Assert(value, DeepEquals, 42) -// c.Assert(array, DeepEquals, []string{"hi", "there"}) -// -var DeepEquals Checker = &deepEqualsChecker{ - &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}}, -} - -func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { - return reflect.DeepEqual(params[0], params[1]), "" -} - -// ----------------------------------------------------------------------- -// HasLen checker. - -type hasLenChecker struct { - *CheckerInfo -} - -// The HasLen checker verifies that the obtained value has the -// provided length. In many cases this is superior to using Equals -// in conjuction with the len function because in case the check -// fails the value itself will be printed, instead of its length, -// providing more details for figuring the problem. -// -// For example: -// -// c.Assert(list, HasLen, 5) -// -var HasLen Checker = &hasLenChecker{ - &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}}, -} - -func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) { - n, ok := params[1].(int) - if !ok { - return false, "n must be an int" - } - value := reflect.ValueOf(params[0]) - switch value.Kind() { - case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String: - default: - return false, "obtained value type has no length" - } - return value.Len() == n, "" -} - -// ----------------------------------------------------------------------- -// ErrorMatches checker. - -type errorMatchesChecker struct { - *CheckerInfo -} - -// The ErrorMatches checker verifies that the error value -// is non nil and matches the regular expression provided. -// -// For example: -// -// c.Assert(err, ErrorMatches, "perm.*denied") -// -var ErrorMatches Checker = errorMatchesChecker{ - &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}}, -} - -func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) { - if params[0] == nil { - return false, "Error value is nil" - } - err, ok := params[0].(error) - if !ok { - return false, "Value is not an error" - } - params[0] = err.Error() - names[0] = "error" - return matches(params[0], params[1]) -} - -// ----------------------------------------------------------------------- -// Matches checker. - -type matchesChecker struct { - *CheckerInfo -} - -// The Matches checker verifies that the string provided as the obtained -// value (or the string resulting from obtained.String()) matches the -// regular expression provided. -// -// For example: -// -// c.Assert(err, Matches, "perm.*denied") -// -var Matches Checker = &matchesChecker{ - &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}}, -} - -func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) { - return matches(params[0], params[1]) -} - -func matches(value, regex interface{}) (result bool, error string) { - reStr, ok := regex.(string) - if !ok { - return false, "Regex must be a string" - } - valueStr, valueIsStr := value.(string) - if !valueIsStr { - if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr { - valueStr, valueIsStr = valueWithStr.String(), true - } - } - if valueIsStr { - matches, err := regexp.MatchString("^"+reStr+"$", valueStr) - if err != nil { - return false, "Can't compile regex: " + err.Error() - } - return matches, "" - } - return false, "Obtained value is not a string and has no .String()" -} - -// ----------------------------------------------------------------------- -// Panics checker. - -type panicsChecker struct { - *CheckerInfo -} - -// The Panics checker verifies that calling the provided zero-argument -// function will cause a panic which is deep-equal to the provided value. -// -// For example: -// -// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}). -// -// -var Panics Checker = &panicsChecker{ - &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}}, -} - -func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) { - f := reflect.ValueOf(params[0]) - if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { - return false, "Function must take zero arguments" - } - defer func() { - // If the function has not panicked, then don't do the check. - if error != "" { - return - } - params[0] = recover() - names[0] = "panic" - result = reflect.DeepEqual(params[0], params[1]) - }() - f.Call(nil) - return false, "Function has not panicked" -} - -type panicMatchesChecker struct { - *CheckerInfo -} - -// The PanicMatches checker verifies that calling the provided zero-argument -// function will cause a panic with an error value matching -// the regular expression provided. -// -// For example: -// -// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`). -// -// -var PanicMatches Checker = &panicMatchesChecker{ - &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}}, -} - -func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) { - f := reflect.ValueOf(params[0]) - if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { - return false, "Function must take zero arguments" - } - defer func() { - // If the function has not panicked, then don't do the check. - if errmsg != "" { - return - } - obtained := recover() - names[0] = "panic" - if e, ok := obtained.(error); ok { - params[0] = e.Error() - } else if _, ok := obtained.(string); ok { - params[0] = obtained - } else { - errmsg = "Panic value is not a string or an error" - return - } - result, errmsg = matches(params[0], params[1]) - }() - f.Call(nil) - return false, "Function has not panicked" -} - -// ----------------------------------------------------------------------- -// FitsTypeOf checker. - -type fitsTypeChecker struct { - *CheckerInfo -} - -// The FitsTypeOf checker verifies that the obtained value is -// assignable to a variable with the same type as the provided -// sample value. -// -// For example: -// -// c.Assert(value, FitsTypeOf, int64(0)) -// c.Assert(value, FitsTypeOf, os.Error(nil)) -// -var FitsTypeOf Checker = &fitsTypeChecker{ - &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}}, -} - -func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) { - obtained := reflect.ValueOf(params[0]) - sample := reflect.ValueOf(params[1]) - if !obtained.IsValid() { - return false, "" - } - if !sample.IsValid() { - return false, "Invalid sample value" - } - return obtained.Type().AssignableTo(sample.Type()), "" -} - -// ----------------------------------------------------------------------- -// Implements checker. - -type implementsChecker struct { - *CheckerInfo -} - -// The Implements checker verifies that the obtained value -// implements the interface specified via a pointer to an interface -// variable. -// -// For example: -// -// var e os.Error -// c.Assert(err, Implements, &e) -// -var Implements Checker = &implementsChecker{ - &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}}, -} - -func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) { - obtained := reflect.ValueOf(params[0]) - ifaceptr := reflect.ValueOf(params[1]) - if !obtained.IsValid() { - return false, "" - } - if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface { - return false, "ifaceptr should be a pointer to an interface variable" - } - return obtained.Type().Implements(ifaceptr.Elem().Type()), "" -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go deleted file mode 100644 index 5c697474..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go +++ /dev/null @@ -1,272 +0,0 @@ -package check_test - -import ( - "errors" - "gopkg.in/check.v1" - "reflect" - "runtime" -) - -type CheckersS struct{} - -var _ = check.Suite(&CheckersS{}) - -func testInfo(c *check.C, checker check.Checker, name string, paramNames []string) { - info := checker.Info() - if info.Name != name { - c.Fatalf("Got name %s, expected %s", info.Name, name) - } - if !reflect.DeepEqual(info.Params, paramNames) { - c.Fatalf("Got param names %#v, expected %#v", info.Params, paramNames) - } -} - -func testCheck(c *check.C, checker check.Checker, result bool, error string, params ...interface{}) ([]interface{}, []string) { - info := checker.Info() - if len(params) != len(info.Params) { - c.Fatalf("unexpected param count in test; expected %d got %d", len(info.Params), len(params)) - } - names := append([]string{}, info.Params...) - result_, error_ := checker.Check(params, names) - if result_ != result || error_ != error { - c.Fatalf("%s.Check(%#v) returned (%#v, %#v) rather than (%#v, %#v)", - info.Name, params, result_, error_, result, error) - } - return params, names -} - -func (s *CheckersS) TestComment(c *check.C) { - bug := check.Commentf("a %d bc", 42) - comment := bug.CheckCommentString() - if comment != "a 42 bc" { - c.Fatalf("Commentf returned %#v", comment) - } -} - -func (s *CheckersS) TestIsNil(c *check.C) { - testInfo(c, check.IsNil, "IsNil", []string{"value"}) - - testCheck(c, check.IsNil, true, "", nil) - testCheck(c, check.IsNil, false, "", "a") - - testCheck(c, check.IsNil, true, "", (chan int)(nil)) - testCheck(c, check.IsNil, false, "", make(chan int)) - testCheck(c, check.IsNil, true, "", (error)(nil)) - testCheck(c, check.IsNil, false, "", errors.New("")) - testCheck(c, check.IsNil, true, "", ([]int)(nil)) - testCheck(c, check.IsNil, false, "", make([]int, 1)) - testCheck(c, check.IsNil, false, "", int(0)) -} - -func (s *CheckersS) TestNotNil(c *check.C) { - testInfo(c, check.NotNil, "NotNil", []string{"value"}) - - testCheck(c, check.NotNil, false, "", nil) - testCheck(c, check.NotNil, true, "", "a") - - testCheck(c, check.NotNil, false, "", (chan int)(nil)) - testCheck(c, check.NotNil, true, "", make(chan int)) - testCheck(c, check.NotNil, false, "", (error)(nil)) - testCheck(c, check.NotNil, true, "", errors.New("")) - testCheck(c, check.NotNil, false, "", ([]int)(nil)) - testCheck(c, check.NotNil, true, "", make([]int, 1)) -} - -func (s *CheckersS) TestNot(c *check.C) { - testInfo(c, check.Not(check.IsNil), "Not(IsNil)", []string{"value"}) - - testCheck(c, check.Not(check.IsNil), false, "", nil) - testCheck(c, check.Not(check.IsNil), true, "", "a") -} - -type simpleStruct struct { - i int -} - -func (s *CheckersS) TestEquals(c *check.C) { - testInfo(c, check.Equals, "Equals", []string{"obtained", "expected"}) - - // The simplest. - testCheck(c, check.Equals, true, "", 42, 42) - testCheck(c, check.Equals, false, "", 42, 43) - - // Different native types. - testCheck(c, check.Equals, false, "", int32(42), int64(42)) - - // With nil. - testCheck(c, check.Equals, false, "", 42, nil) - - // Slices - testCheck(c, check.Equals, false, "runtime error: comparing uncomparable type []uint8", []byte{1, 2}, []byte{1, 2}) - - // Struct values - testCheck(c, check.Equals, true, "", simpleStruct{1}, simpleStruct{1}) - testCheck(c, check.Equals, false, "", simpleStruct{1}, simpleStruct{2}) - - // Struct pointers - testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{1}) - testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{2}) -} - -func (s *CheckersS) TestDeepEquals(c *check.C) { - testInfo(c, check.DeepEquals, "DeepEquals", []string{"obtained", "expected"}) - - // The simplest. - testCheck(c, check.DeepEquals, true, "", 42, 42) - testCheck(c, check.DeepEquals, false, "", 42, 43) - - // Different native types. - testCheck(c, check.DeepEquals, false, "", int32(42), int64(42)) - - // With nil. - testCheck(c, check.DeepEquals, false, "", 42, nil) - - // Slices - testCheck(c, check.DeepEquals, true, "", []byte{1, 2}, []byte{1, 2}) - testCheck(c, check.DeepEquals, false, "", []byte{1, 2}, []byte{1, 3}) - - // Struct values - testCheck(c, check.DeepEquals, true, "", simpleStruct{1}, simpleStruct{1}) - testCheck(c, check.DeepEquals, false, "", simpleStruct{1}, simpleStruct{2}) - - // Struct pointers - testCheck(c, check.DeepEquals, true, "", &simpleStruct{1}, &simpleStruct{1}) - testCheck(c, check.DeepEquals, false, "", &simpleStruct{1}, &simpleStruct{2}) -} - -func (s *CheckersS) TestHasLen(c *check.C) { - testInfo(c, check.HasLen, "HasLen", []string{"obtained", "n"}) - - testCheck(c, check.HasLen, true, "", "abcd", 4) - testCheck(c, check.HasLen, true, "", []int{1, 2}, 2) - testCheck(c, check.HasLen, false, "", []int{1, 2}, 3) - - testCheck(c, check.HasLen, false, "n must be an int", []int{1, 2}, "2") - testCheck(c, check.HasLen, false, "obtained value type has no length", nil, 2) -} - -func (s *CheckersS) TestErrorMatches(c *check.C) { - testInfo(c, check.ErrorMatches, "ErrorMatches", []string{"value", "regex"}) - - testCheck(c, check.ErrorMatches, false, "Error value is nil", nil, "some error") - testCheck(c, check.ErrorMatches, false, "Value is not an error", 1, "some error") - testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "some error") - testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "so.*or") - - // Verify params mutation - params, names := testCheck(c, check.ErrorMatches, false, "", errors.New("some error"), "other error") - c.Assert(params[0], check.Equals, "some error") - c.Assert(names[0], check.Equals, "error") -} - -func (s *CheckersS) TestMatches(c *check.C) { - testInfo(c, check.Matches, "Matches", []string{"value", "regex"}) - - // Simple matching - testCheck(c, check.Matches, true, "", "abc", "abc") - testCheck(c, check.Matches, true, "", "abc", "a.c") - - // Must match fully - testCheck(c, check.Matches, false, "", "abc", "ab") - testCheck(c, check.Matches, false, "", "abc", "bc") - - // String()-enabled values accepted - testCheck(c, check.Matches, true, "", reflect.ValueOf("abc"), "a.c") - testCheck(c, check.Matches, false, "", reflect.ValueOf("abc"), "a.d") - - // Some error conditions. - testCheck(c, check.Matches, false, "Obtained value is not a string and has no .String()", 1, "a.c") - testCheck(c, check.Matches, false, "Can't compile regex: error parsing regexp: missing closing ]: `[c$`", "abc", "a[c") -} - -func (s *CheckersS) TestPanics(c *check.C) { - testInfo(c, check.Panics, "Panics", []string{"function", "expected"}) - - // Some errors. - testCheck(c, check.Panics, false, "Function has not panicked", func() bool { return false }, "BOOM") - testCheck(c, check.Panics, false, "Function must take zero arguments", 1, "BOOM") - - // Plain strings. - testCheck(c, check.Panics, true, "", func() { panic("BOOM") }, "BOOM") - testCheck(c, check.Panics, false, "", func() { panic("KABOOM") }, "BOOM") - testCheck(c, check.Panics, true, "", func() bool { panic("BOOM") }, "BOOM") - - // Error values. - testCheck(c, check.Panics, true, "", func() { panic(errors.New("BOOM")) }, errors.New("BOOM")) - testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) - - type deep struct{ i int } - // Deep value - testCheck(c, check.Panics, true, "", func() { panic(&deep{99}) }, &deep{99}) - - // Verify params/names mutation - params, names := testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) - c.Assert(params[0], check.ErrorMatches, "KABOOM") - c.Assert(names[0], check.Equals, "panic") - - // Verify a nil panic - testCheck(c, check.Panics, true, "", func() { panic(nil) }, nil) - testCheck(c, check.Panics, false, "", func() { panic(nil) }, "NOPE") -} - -func (s *CheckersS) TestPanicMatches(c *check.C) { - testInfo(c, check.PanicMatches, "PanicMatches", []string{"function", "expected"}) - - // Error matching. - testCheck(c, check.PanicMatches, true, "", func() { panic(errors.New("BOOM")) }, "BO.M") - testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BO.M") - - // Some errors. - testCheck(c, check.PanicMatches, false, "Function has not panicked", func() bool { return false }, "BOOM") - testCheck(c, check.PanicMatches, false, "Function must take zero arguments", 1, "BOOM") - - // Plain strings. - testCheck(c, check.PanicMatches, true, "", func() { panic("BOOM") }, "BO.M") - testCheck(c, check.PanicMatches, false, "", func() { panic("KABOOM") }, "BOOM") - testCheck(c, check.PanicMatches, true, "", func() bool { panic("BOOM") }, "BO.M") - - // Verify params/names mutation - params, names := testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BOOM") - c.Assert(params[0], check.Equals, "KABOOM") - c.Assert(names[0], check.Equals, "panic") - - // Verify a nil panic - testCheck(c, check.PanicMatches, false, "Panic value is not a string or an error", func() { panic(nil) }, "") -} - -func (s *CheckersS) TestFitsTypeOf(c *check.C) { - testInfo(c, check.FitsTypeOf, "FitsTypeOf", []string{"obtained", "sample"}) - - // Basic types - testCheck(c, check.FitsTypeOf, true, "", 1, 0) - testCheck(c, check.FitsTypeOf, false, "", 1, int64(0)) - - // Aliases - testCheck(c, check.FitsTypeOf, false, "", 1, errors.New("")) - testCheck(c, check.FitsTypeOf, false, "", "error", errors.New("")) - testCheck(c, check.FitsTypeOf, true, "", errors.New("error"), errors.New("")) - - // Structures - testCheck(c, check.FitsTypeOf, false, "", 1, simpleStruct{}) - testCheck(c, check.FitsTypeOf, false, "", simpleStruct{42}, &simpleStruct{}) - testCheck(c, check.FitsTypeOf, true, "", simpleStruct{42}, simpleStruct{}) - testCheck(c, check.FitsTypeOf, true, "", &simpleStruct{42}, &simpleStruct{}) - - // Some bad values - testCheck(c, check.FitsTypeOf, false, "Invalid sample value", 1, interface{}(nil)) - testCheck(c, check.FitsTypeOf, false, "", interface{}(nil), 0) -} - -func (s *CheckersS) TestImplements(c *check.C) { - testInfo(c, check.Implements, "Implements", []string{"obtained", "ifaceptr"}) - - var e error - var re runtime.Error - testCheck(c, check.Implements, true, "", errors.New(""), &e) - testCheck(c, check.Implements, false, "", errors.New(""), &re) - - // Some bad values - testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, errors.New("")) - testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, interface{}(nil)) - testCheck(c, check.Implements, false, "", interface{}(nil), &e) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go deleted file mode 100644 index 0e6cfe0f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package check - -func PrintLine(filename string, line int) (string, error) { - return printLine(filename, line) -} - -func Indent(s, with string) string { - return indent(s, with) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go deleted file mode 100644 index 2bff9e16..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go +++ /dev/null @@ -1,484 +0,0 @@ -// Tests for the behavior of the test fixture system. - -package check_test - -import ( - . "gopkg.in/check.v1" -) - -// ----------------------------------------------------------------------- -// Fixture test suite. - -type FixtureS struct{} - -var fixtureS = Suite(&FixtureS{}) - -func (s *FixtureS) TestCountSuite(c *C) { - suitesRun += 1 -} - -// ----------------------------------------------------------------------- -// Basic fixture ordering verification. - -func (s *FixtureS) TestOrder(c *C) { - helper := FixtureHelper{} - Run(&helper, nil) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) -} - -// ----------------------------------------------------------------------- -// Check the behavior when panics occur within tests and fixtures. - -func (s *FixtureS) TestPanicOnTest(c *C) { - helper := FixtureHelper{panicOn: "Test1"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: FixtureHelper.Test1\n\n" + - "\\.\\.\\. Panic: Test1 \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.Test1\n" + - "(.|\n)*$" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnSetUpTest(c *C) { - helper := FixtureHelper{panicOn: "SetUpTest"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "TearDownTest") - c.Check(helper.calls[3], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 4) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper\\.SetUpTest\n\n" + - "\\.\\.\\. Panic: SetUpTest \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.SetUpTest\n" + - "(.|\n)*" + - "\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper\\.Test1\n\n" + - "\\.\\.\\. Panic: Fixture has panicked " + - "\\(see related PANIC\\)\n$" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnTearDownTest(c *C) { - helper := FixtureHelper{panicOn: "TearDownTest"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 5) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper.TearDownTest\n\n" + - "\\.\\.\\. Panic: TearDownTest \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.TearDownTest\n" + - "(.|\n)*" + - "\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper\\.Test1\n\n" + - "\\.\\.\\. Panic: Fixture has panicked " + - "\\(see related PANIC\\)\n$" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnSetUpSuite(c *C) { - helper := FixtureHelper{panicOn: "SetUpSuite"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 2) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper.SetUpSuite\n\n" + - "\\.\\.\\. Panic: SetUpSuite \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.SetUpSuite\n" + - "(.|\n)*$" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnTearDownSuite(c *C) { - helper := FixtureHelper{panicOn: "TearDownSuite"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper.TearDownSuite\n\n" + - "\\.\\.\\. Panic: TearDownSuite \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.TearDownSuite\n" + - "(.|\n)*$" - - c.Check(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// A wrong argument on a test or fixture will produce a nice error. - -func (s *FixtureS) TestPanicOnWrongTestArg(c *C) { - helper := WrongTestArgHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "TearDownTest") - c.Check(helper.calls[3], Equals, "SetUpTest") - c.Check(helper.calls[4], Equals, "Test2") - c.Check(helper.calls[5], Equals, "TearDownTest") - c.Check(helper.calls[6], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 7) - - expected := "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongTestArgHelper\\.Test1\n\n" + - "\\.\\.\\. Panic: WrongTestArgHelper\\.Test1 argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnWrongSetUpTestArg(c *C) { - helper := WrongSetUpTestArgHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(len(helper.calls), Equals, 0) - - expected := - "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongSetUpTestArgHelper\\.SetUpTest\n\n" + - "\\.\\.\\. Panic: WrongSetUpTestArgHelper\\.SetUpTest argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnWrongSetUpSuiteArg(c *C) { - helper := WrongSetUpSuiteArgHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(len(helper.calls), Equals, 0) - - expected := - "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongSetUpSuiteArgHelper\\.SetUpSuite\n\n" + - "\\.\\.\\. Panic: WrongSetUpSuiteArgHelper\\.SetUpSuite argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// Nice errors also when tests or fixture have wrong arg count. - -func (s *FixtureS) TestPanicOnWrongTestArgCount(c *C) { - helper := WrongTestArgCountHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "TearDownTest") - c.Check(helper.calls[3], Equals, "SetUpTest") - c.Check(helper.calls[4], Equals, "Test2") - c.Check(helper.calls[5], Equals, "TearDownTest") - c.Check(helper.calls[6], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 7) - - expected := "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongTestArgCountHelper\\.Test1\n\n" + - "\\.\\.\\. Panic: WrongTestArgCountHelper\\.Test1 argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnWrongSetUpTestArgCount(c *C) { - helper := WrongSetUpTestArgCountHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(len(helper.calls), Equals, 0) - - expected := - "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongSetUpTestArgCountHelper\\.SetUpTest\n\n" + - "\\.\\.\\. Panic: WrongSetUpTestArgCountHelper\\.SetUpTest argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnWrongSetUpSuiteArgCount(c *C) { - helper := WrongSetUpSuiteArgCountHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(len(helper.calls), Equals, 0) - - expected := - "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongSetUpSuiteArgCountHelper\\.SetUpSuite\n\n" + - "\\.\\.\\. Panic: WrongSetUpSuiteArgCountHelper" + - "\\.SetUpSuite argument should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// Helper test suites with wrong function arguments. - -type WrongTestArgHelper struct { - FixtureHelper -} - -func (s *WrongTestArgHelper) Test1(t int) { -} - -type WrongSetUpTestArgHelper struct { - FixtureHelper -} - -func (s *WrongSetUpTestArgHelper) SetUpTest(t int) { -} - -type WrongSetUpSuiteArgHelper struct { - FixtureHelper -} - -func (s *WrongSetUpSuiteArgHelper) SetUpSuite(t int) { -} - -type WrongTestArgCountHelper struct { - FixtureHelper -} - -func (s *WrongTestArgCountHelper) Test1(c *C, i int) { -} - -type WrongSetUpTestArgCountHelper struct { - FixtureHelper -} - -func (s *WrongSetUpTestArgCountHelper) SetUpTest(c *C, i int) { -} - -type WrongSetUpSuiteArgCountHelper struct { - FixtureHelper -} - -func (s *WrongSetUpSuiteArgCountHelper) SetUpSuite(c *C, i int) { -} - -// ----------------------------------------------------------------------- -// Ensure fixture doesn't run without tests. - -type NoTestsHelper struct { - hasRun bool -} - -func (s *NoTestsHelper) SetUpSuite(c *C) { - s.hasRun = true -} - -func (s *NoTestsHelper) TearDownSuite(c *C) { - s.hasRun = true -} - -func (s *FixtureS) TestFixtureDoesntRunWithoutTests(c *C) { - helper := NoTestsHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.hasRun, Equals, false) -} - -// ----------------------------------------------------------------------- -// Verify that checks and assertions work correctly inside the fixture. - -type FixtureCheckHelper struct { - fail string - completed bool -} - -func (s *FixtureCheckHelper) SetUpSuite(c *C) { - switch s.fail { - case "SetUpSuiteAssert": - c.Assert(false, Equals, true) - case "SetUpSuiteCheck": - c.Check(false, Equals, true) - } - s.completed = true -} - -func (s *FixtureCheckHelper) SetUpTest(c *C) { - switch s.fail { - case "SetUpTestAssert": - c.Assert(false, Equals, true) - case "SetUpTestCheck": - c.Check(false, Equals, true) - } - s.completed = true -} - -func (s *FixtureCheckHelper) Test(c *C) { - // Do nothing. -} - -func (s *FixtureS) TestSetUpSuiteCheck(c *C) { - helper := FixtureCheckHelper{fail: "SetUpSuiteCheck"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Assert(output.value, Matches, - "\n---+\n"+ - "FAIL: fixture_test\\.go:[0-9]+: "+ - "FixtureCheckHelper\\.SetUpSuite\n\n"+ - "fixture_test\\.go:[0-9]+:\n"+ - " c\\.Check\\(false, Equals, true\\)\n"+ - "\\.+ obtained bool = false\n"+ - "\\.+ expected bool = true\n\n") - c.Assert(helper.completed, Equals, true) -} - -func (s *FixtureS) TestSetUpSuiteAssert(c *C) { - helper := FixtureCheckHelper{fail: "SetUpSuiteAssert"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Assert(output.value, Matches, - "\n---+\n"+ - "FAIL: fixture_test\\.go:[0-9]+: "+ - "FixtureCheckHelper\\.SetUpSuite\n\n"+ - "fixture_test\\.go:[0-9]+:\n"+ - " c\\.Assert\\(false, Equals, true\\)\n"+ - "\\.+ obtained bool = false\n"+ - "\\.+ expected bool = true\n\n") - c.Assert(helper.completed, Equals, false) -} - -// ----------------------------------------------------------------------- -// Verify that logging within SetUpTest() persists within the test log itself. - -type FixtureLogHelper struct { - c *C -} - -func (s *FixtureLogHelper) SetUpTest(c *C) { - s.c = c - c.Log("1") -} - -func (s *FixtureLogHelper) Test(c *C) { - c.Log("2") - s.c.Log("3") - c.Log("4") - c.Fail() -} - -func (s *FixtureLogHelper) TearDownTest(c *C) { - s.c.Log("5") -} - -func (s *FixtureS) TestFixtureLogging(c *C) { - helper := FixtureLogHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Assert(output.value, Matches, - "\n---+\n"+ - "FAIL: fixture_test\\.go:[0-9]+: "+ - "FixtureLogHelper\\.Test\n\n"+ - "1\n2\n3\n4\n5\n") -} - -// ----------------------------------------------------------------------- -// Skip() within fixture methods. - -func (s *FixtureS) TestSkipSuite(c *C) { - helper := FixtureHelper{skip: true, skipOnN: 0} - output := String{} - result := Run(&helper, &RunConf{Output: &output}) - c.Assert(output.value, Equals, "") - c.Assert(helper.calls[0], Equals, "SetUpSuite") - c.Assert(helper.calls[1], Equals, "TearDownSuite") - c.Assert(len(helper.calls), Equals, 2) - c.Assert(result.Skipped, Equals, 2) -} - -func (s *FixtureS) TestSkipTest(c *C) { - helper := FixtureHelper{skip: true, skipOnN: 1} - output := String{} - result := Run(&helper, &RunConf{Output: &output}) - c.Assert(helper.calls[0], Equals, "SetUpSuite") - c.Assert(helper.calls[1], Equals, "SetUpTest") - c.Assert(helper.calls[2], Equals, "SetUpTest") - c.Assert(helper.calls[3], Equals, "Test2") - c.Assert(helper.calls[4], Equals, "TearDownTest") - c.Assert(helper.calls[5], Equals, "TearDownSuite") - c.Assert(len(helper.calls), Equals, 6) - c.Assert(result.Skipped, Equals, 1) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go deleted file mode 100644 index 8ecf7915..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go +++ /dev/null @@ -1,335 +0,0 @@ -// These tests check that the foundations of gocheck are working properly. -// They already assume that fundamental failing is working already, though, -// since this was tested in bootstrap_test.go. Even then, some care may -// still have to be taken when using external functions, since they should -// of course not rely on functionality tested here. - -package check_test - -import ( - "fmt" - "gopkg.in/check.v1" - "log" - "os" - "regexp" - "strings" -) - -// ----------------------------------------------------------------------- -// Foundation test suite. - -type FoundationS struct{} - -var foundationS = check.Suite(&FoundationS{}) - -func (s *FoundationS) TestCountSuite(c *check.C) { - suitesRun += 1 -} - -func (s *FoundationS) TestErrorf(c *check.C) { - // Do not use checkState() here. It depends on Errorf() working. - expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ - " c.Errorf(\"Error %%v!\", \"message\")\n"+ - "... Error: Error message!\n\n", - getMyLine()+1) - c.Errorf("Error %v!", "message") - failed := c.Failed() - c.Succeed() - if log := c.GetTestLog(); log != expectedLog { - c.Logf("Errorf() logged %#v rather than %#v", log, expectedLog) - c.Fail() - } - if !failed { - c.Logf("Errorf() didn't put the test in a failed state") - c.Fail() - } -} - -func (s *FoundationS) TestError(c *check.C) { - expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ - " c\\.Error\\(\"Error \", \"message!\"\\)\n"+ - "\\.\\.\\. Error: Error message!\n\n", - getMyLine()+1) - c.Error("Error ", "message!") - checkState(c, nil, - &expectedState{ - name: "Error(`Error `, `message!`)", - failed: true, - log: expectedLog, - }) -} - -func (s *FoundationS) TestFailNow(c *check.C) { - defer (func() { - if !c.Failed() { - c.Error("FailNow() didn't fail the test") - } else { - c.Succeed() - if c.GetTestLog() != "" { - c.Error("Something got logged:\n" + c.GetTestLog()) - } - } - })() - - c.FailNow() - c.Log("FailNow() didn't stop the test") -} - -func (s *FoundationS) TestSucceedNow(c *check.C) { - defer (func() { - if c.Failed() { - c.Error("SucceedNow() didn't succeed the test") - } - if c.GetTestLog() != "" { - c.Error("Something got logged:\n" + c.GetTestLog()) - } - })() - - c.Fail() - c.SucceedNow() - c.Log("SucceedNow() didn't stop the test") -} - -func (s *FoundationS) TestFailureHeader(c *check.C) { - output := String{} - failHelper := FailHelper{} - check.Run(&failHelper, &check.RunConf{Output: &output}) - header := fmt.Sprintf(""+ - "\n-----------------------------------"+ - "-----------------------------------\n"+ - "FAIL: check_test.go:%d: FailHelper.TestLogAndFail\n", - failHelper.testLine) - if strings.Index(output.value, header) == -1 { - c.Errorf(""+ - "Failure didn't print a proper header.\n"+ - "... Got:\n%s... Expected something with:\n%s", - output.value, header) - } -} - -func (s *FoundationS) TestFatal(c *check.C) { - var line int - defer (func() { - if !c.Failed() { - c.Error("Fatal() didn't fail the test") - } else { - c.Succeed() - expected := fmt.Sprintf("foundation_test.go:%d:\n"+ - " c.Fatal(\"Die \", \"now!\")\n"+ - "... Error: Die now!\n\n", - line) - if c.GetTestLog() != expected { - c.Error("Incorrect log:", c.GetTestLog()) - } - } - })() - - line = getMyLine() + 1 - c.Fatal("Die ", "now!") - c.Log("Fatal() didn't stop the test") -} - -func (s *FoundationS) TestFatalf(c *check.C) { - var line int - defer (func() { - if !c.Failed() { - c.Error("Fatalf() didn't fail the test") - } else { - c.Succeed() - expected := fmt.Sprintf("foundation_test.go:%d:\n"+ - " c.Fatalf(\"Die %%s!\", \"now\")\n"+ - "... Error: Die now!\n\n", - line) - if c.GetTestLog() != expected { - c.Error("Incorrect log:", c.GetTestLog()) - } - } - })() - - line = getMyLine() + 1 - c.Fatalf("Die %s!", "now") - c.Log("Fatalf() didn't stop the test") -} - -func (s *FoundationS) TestCallerLoggingInsideTest(c *check.C) { - log := fmt.Sprintf(""+ - "foundation_test.go:%d:\n"+ - " result := c.Check\\(10, check.Equals, 20\\)\n"+ - "\\.\\.\\. obtained int = 10\n"+ - "\\.\\.\\. expected int = 20\n\n", - getMyLine()+1) - result := c.Check(10, check.Equals, 20) - checkState(c, result, - &expectedState{ - name: "Check(10, Equals, 20)", - result: false, - failed: true, - log: log, - }) -} - -func (s *FoundationS) TestCallerLoggingInDifferentFile(c *check.C) { - result, line := checkEqualWrapper(c, 10, 20) - testLine := getMyLine() - 1 - log := fmt.Sprintf(""+ - "foundation_test.go:%d:\n"+ - " result, line := checkEqualWrapper\\(c, 10, 20\\)\n"+ - "check_test.go:%d:\n"+ - " return c.Check\\(obtained, check.Equals, expected\\), getMyLine\\(\\)\n"+ - "\\.\\.\\. obtained int = 10\n"+ - "\\.\\.\\. expected int = 20\n\n", - testLine, line) - checkState(c, result, - &expectedState{ - name: "Check(10, Equals, 20)", - result: false, - failed: true, - log: log, - }) -} - -// ----------------------------------------------------------------------- -// ExpectFailure() inverts the logic of failure. - -type ExpectFailureSucceedHelper struct{} - -func (s *ExpectFailureSucceedHelper) TestSucceed(c *check.C) { - c.ExpectFailure("It booms!") - c.Error("Boom!") -} - -type ExpectFailureFailHelper struct{} - -func (s *ExpectFailureFailHelper) TestFail(c *check.C) { - c.ExpectFailure("Bug #XYZ") -} - -func (s *FoundationS) TestExpectFailureFail(c *check.C) { - helper := ExpectFailureFailHelper{} - output := String{} - result := check.Run(&helper, &check.RunConf{Output: &output}) - - expected := "" + - "^\n-+\n" + - "FAIL: foundation_test\\.go:[0-9]+:" + - " ExpectFailureFailHelper\\.TestFail\n\n" + - "\\.\\.\\. Error: Test succeeded, but was expected to fail\n" + - "\\.\\.\\. Reason: Bug #XYZ\n$" - - matched, err := regexp.MatchString(expected, output.value) - if err != nil { - c.Error("Bad expression: ", expected) - } else if !matched { - c.Error("ExpectFailure() didn't log properly:\n", output.value) - } - - c.Assert(result.ExpectedFailures, check.Equals, 0) -} - -func (s *FoundationS) TestExpectFailureSucceed(c *check.C) { - helper := ExpectFailureSucceedHelper{} - output := String{} - result := check.Run(&helper, &check.RunConf{Output: &output}) - - c.Assert(output.value, check.Equals, "") - c.Assert(result.ExpectedFailures, check.Equals, 1) -} - -func (s *FoundationS) TestExpectFailureSucceedVerbose(c *check.C) { - helper := ExpectFailureSucceedHelper{} - output := String{} - result := check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) - - expected := "" + - "FAIL EXPECTED: foundation_test\\.go:[0-9]+:" + - " ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n" - - matched, err := regexp.MatchString(expected, output.value) - if err != nil { - c.Error("Bad expression: ", expected) - } else if !matched { - c.Error("ExpectFailure() didn't log properly:\n", output.value) - } - - c.Assert(result.ExpectedFailures, check.Equals, 1) -} - -// ----------------------------------------------------------------------- -// Skip() allows stopping a test without positive/negative results. - -type SkipTestHelper struct{} - -func (s *SkipTestHelper) TestFail(c *check.C) { - c.Skip("Wrong platform or whatever") - c.Error("Boom!") -} - -func (s *FoundationS) TestSkip(c *check.C) { - helper := SkipTestHelper{} - output := String{} - check.Run(&helper, &check.RunConf{Output: &output}) - - if output.value != "" { - c.Error("Skip() logged something:\n", output.value) - } -} - -func (s *FoundationS) TestSkipVerbose(c *check.C) { - helper := SkipTestHelper{} - output := String{} - check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) - - expected := "SKIP: foundation_test\\.go:[0-9]+: SkipTestHelper\\.TestFail" + - " \\(Wrong platform or whatever\\)" - matched, err := regexp.MatchString(expected, output.value) - if err != nil { - c.Error("Bad expression: ", expected) - } else if !matched { - c.Error("Skip() didn't log properly:\n", output.value) - } -} - -// ----------------------------------------------------------------------- -// Check minimum *log.Logger interface provided by *check.C. - -type minLogger interface { - Output(calldepth int, s string) error -} - -func (s *BootstrapS) TestMinLogger(c *check.C) { - var logger minLogger - logger = log.New(os.Stderr, "", 0) - logger = c - logger.Output(0, "Hello there") - expected := `\[LOG\] [0-9]+:[0-9][0-9]\.[0-9][0-9][0-9] +Hello there\n` - output := c.GetTestLog() - c.Assert(output, check.Matches, expected) -} - -// ----------------------------------------------------------------------- -// Ensure that suites with embedded types are working fine, including the -// the workaround for issue 906. - -type EmbeddedInternalS struct { - called bool -} - -type EmbeddedS struct { - EmbeddedInternalS -} - -var embeddedS = check.Suite(&EmbeddedS{}) - -func (s *EmbeddedS) TestCountSuite(c *check.C) { - suitesRun += 1 -} - -func (s *EmbeddedInternalS) TestMethod(c *check.C) { - c.Error("TestMethod() of the embedded type was called!?") -} - -func (s *EmbeddedS) TestMethod(c *check.C) { - // http://code.google.com/p/go/issues/detail?id=906 - c.Check(s.called, check.Equals, false) // Go issue 906 is affecting the runner? - s.called = true -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go deleted file mode 100644 index 4b6c26da..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go +++ /dev/null @@ -1,231 +0,0 @@ -package check - -import ( - "fmt" - "strings" - "time" -) - -// TestName returns the current test name in the form "SuiteName.TestName" -func (c *C) TestName() string { - return c.testName -} - -// ----------------------------------------------------------------------- -// Basic succeeding/failing logic. - -// Failed returns whether the currently running test has already failed. -func (c *C) Failed() bool { - return c.status == failedSt -} - -// Fail marks the currently running test as failed. -// -// Something ought to have been previously logged so the developer can tell -// what went wrong. The higher level helper functions will fail the test -// and do the logging properly. -func (c *C) Fail() { - c.status = failedSt -} - -// FailNow marks the currently running test as failed and stops running it. -// Something ought to have been previously logged so the developer can tell -// what went wrong. The higher level helper functions will fail the test -// and do the logging properly. -func (c *C) FailNow() { - c.Fail() - c.stopNow() -} - -// Succeed marks the currently running test as succeeded, undoing any -// previous failures. -func (c *C) Succeed() { - c.status = succeededSt -} - -// SucceedNow marks the currently running test as succeeded, undoing any -// previous failures, and stops running the test. -func (c *C) SucceedNow() { - c.Succeed() - c.stopNow() -} - -// ExpectFailure informs that the running test is knowingly broken for -// the provided reason. If the test does not fail, an error will be reported -// to raise attention to this fact. This method is useful to temporarily -// disable tests which cover well known problems until a better time to -// fix the problem is found, without forgetting about the fact that a -// failure still exists. -func (c *C) ExpectFailure(reason string) { - if reason == "" { - panic("Missing reason why the test is expected to fail") - } - c.mustFail = true - c.reason = reason -} - -// Skip skips the running test for the provided reason. If run from within -// SetUpTest, the individual test being set up will be skipped, and if run -// from within SetUpSuite, the whole suite is skipped. -func (c *C) Skip(reason string) { - if reason == "" { - panic("Missing reason why the test is being skipped") - } - c.reason = reason - c.status = skippedSt - c.stopNow() -} - -// ----------------------------------------------------------------------- -// Basic logging. - -// GetTestLog returns the current test error output. -func (c *C) GetTestLog() string { - return c.logb.String() -} - -// Log logs some information into the test error output. -// The provided arguments are assembled together into a string with fmt.Sprint. -func (c *C) Log(args ...interface{}) { - c.log(args...) -} - -// Log logs some information into the test error output. -// The provided arguments are assembled together into a string with fmt.Sprintf. -func (c *C) Logf(format string, args ...interface{}) { - c.logf(format, args...) -} - -// Output enables *C to be used as a logger in functions that require only -// the minimum interface of *log.Logger. -func (c *C) Output(calldepth int, s string) error { - d := time.Now().Sub(c.startTime) - msec := d / time.Millisecond - sec := d / time.Second - min := d / time.Minute - - c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s) - return nil -} - -// Error logs an error into the test error output and marks the test as failed. -// The provided arguments are assembled together into a string with fmt.Sprint. -func (c *C) Error(args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) - c.logNewLine() - c.Fail() -} - -// Errorf logs an error into the test error output and marks the test as failed. -// The provided arguments are assembled together into a string with fmt.Sprintf. -func (c *C) Errorf(format string, args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprintf("Error: "+format, args...)) - c.logNewLine() - c.Fail() -} - -// Fatal logs an error into the test error output, marks the test as failed, and -// stops the test execution. The provided arguments are assembled together into -// a string with fmt.Sprint. -func (c *C) Fatal(args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) - c.logNewLine() - c.FailNow() -} - -// Fatlaf logs an error into the test error output, marks the test as failed, and -// stops the test execution. The provided arguments are assembled together into -// a string with fmt.Sprintf. -func (c *C) Fatalf(format string, args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...))) - c.logNewLine() - c.FailNow() -} - -// ----------------------------------------------------------------------- -// Generic checks and assertions based on checkers. - -// Check verifies if the first value matches the expected value according -// to the provided checker. If they do not match, an error is logged, the -// test is marked as failed, and the test execution continues. -// -// Some checkers may not need the expected argument (e.g. IsNil). -// -// Extra arguments provided to the function are logged next to the reported -// problem when the matching fails. -func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool { - return c.internalCheck("Check", obtained, checker, args...) -} - -// Assert ensures that the first value matches the expected value according -// to the provided checker. If they do not match, an error is logged, the -// test is marked as failed, and the test execution stops. -// -// Some checkers may not need the expected argument (e.g. IsNil). -// -// Extra arguments provided to the function are logged next to the reported -// problem when the matching fails. -func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) { - if !c.internalCheck("Assert", obtained, checker, args...) { - c.stopNow() - } -} - -func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool { - if checker == nil { - c.logCaller(2) - c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName)) - c.logString("Oops.. you've provided a nil checker!") - c.logNewLine() - c.Fail() - return false - } - - // If the last argument is a bug info, extract it out. - var comment CommentInterface - if len(args) > 0 { - if c, ok := args[len(args)-1].(CommentInterface); ok { - comment = c - args = args[:len(args)-1] - } - } - - params := append([]interface{}{obtained}, args...) - info := checker.Info() - - if len(params) != len(info.Params) { - names := append([]string{info.Params[0], info.Name}, info.Params[1:]...) - c.logCaller(2) - c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", "))) - c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1)) - c.logNewLine() - c.Fail() - return false - } - - // Copy since it may be mutated by Check. - names := append([]string{}, info.Params...) - - // Do the actual check. - result, error := checker.Check(params, names) - if !result || error != "" { - c.logCaller(2) - for i := 0; i != len(params); i++ { - c.logValue(names[i], params[i]) - } - if comment != nil { - c.logString(comment.CheckCommentString()) - } - if error != "" { - c.logString(error) - } - c.logNewLine() - c.Fail() - return false - } - return true -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go deleted file mode 100644 index 4baa656b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go +++ /dev/null @@ -1,519 +0,0 @@ -// These tests verify the inner workings of the helper methods associated -// with check.T. - -package check_test - -import ( - "gopkg.in/check.v1" - "os" - "reflect" - "runtime" - "sync" -) - -var helpersS = check.Suite(&HelpersS{}) - -type HelpersS struct{} - -func (s *HelpersS) TestCountSuite(c *check.C) { - suitesRun += 1 -} - -// ----------------------------------------------------------------------- -// Fake checker and bug info to verify the behavior of Assert() and Check(). - -type MyChecker struct { - info *check.CheckerInfo - params []interface{} - names []string - result bool - error string -} - -func (checker *MyChecker) Info() *check.CheckerInfo { - if checker.info == nil { - return &check.CheckerInfo{Name: "MyChecker", Params: []string{"myobtained", "myexpected"}} - } - return checker.info -} - -func (checker *MyChecker) Check(params []interface{}, names []string) (bool, string) { - rparams := checker.params - rnames := checker.names - checker.params = append([]interface{}{}, params...) - checker.names = append([]string{}, names...) - if rparams != nil { - copy(params, rparams) - } - if rnames != nil { - copy(names, rnames) - } - return checker.result, checker.error -} - -type myCommentType string - -func (c myCommentType) CheckCommentString() string { - return string(c) -} - -func myComment(s string) myCommentType { - return myCommentType(s) -} - -// ----------------------------------------------------------------------- -// Ensure a real checker actually works fine. - -func (s *HelpersS) TestCheckerInterface(c *check.C) { - testHelperSuccess(c, "Check(1, Equals, 1)", true, func() interface{} { - return c.Check(1, check.Equals, 1) - }) -} - -// ----------------------------------------------------------------------- -// Tests for Check(), mostly the same as for Assert() following these. - -func (s *HelpersS) TestCheckSucceedWithExpected(c *check.C) { - checker := &MyChecker{result: true} - testHelperSuccess(c, "Check(1, checker, 2)", true, func() interface{} { - return c.Check(1, checker, 2) - }) - if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { - c.Fatalf("Bad params for check: %#v", checker.params) - } -} - -func (s *HelpersS) TestCheckSucceedWithoutExpected(c *check.C) { - checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - testHelperSuccess(c, "Check(1, checker)", true, func() interface{} { - return c.Check(1, checker) - }) - if !reflect.DeepEqual(checker.params, []interface{}{1}) { - c.Fatalf("Bad params for check: %#v", checker.params) - } -} - -func (s *HelpersS) TestCheckFailWithExpected(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n\n" - testHelperFailure(c, "Check(1, checker, 2)", false, false, log, - func() interface{} { - return c.Check(1, checker, 2) - }) -} - -func (s *HelpersS) TestCheckFailWithExpectedAndComment(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n" + - "\\.+ Hello world!\n\n" - testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, - func() interface{} { - return c.Check(1, checker, 2, myComment("Hello world!")) - }) -} - -func (s *HelpersS) TestCheckFailWithExpectedAndStaticComment(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " // Nice leading comment\\.\n" + - " return c\\.Check\\(1, checker, 2\\) // Hello there\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n\n" - testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, - func() interface{} { - // Nice leading comment. - return c.Check(1, checker, 2) // Hello there - }) -} - -func (s *HelpersS) TestCheckFailWithoutExpected(c *check.C) { - checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker\\)\n" + - "\\.+ myvalue int = 1\n\n" - testHelperFailure(c, "Check(1, checker)", false, false, log, - func() interface{} { - return c.Check(1, checker) - }) -} - -func (s *HelpersS) TestCheckFailWithoutExpectedAndMessage(c *check.C) { - checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + - "\\.+ myvalue int = 1\n" + - "\\.+ Hello world!\n\n" - testHelperFailure(c, "Check(1, checker, msg)", false, false, log, - func() interface{} { - return c.Check(1, checker, myComment("Hello world!")) - }) -} - -func (s *HelpersS) TestCheckWithMissingExpected(c *check.C) { - checker := &MyChecker{result: true} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker\\)\n" + - "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + - "\\.+ Wrong number of parameters for MyChecker: " + - "want 3, got 2\n\n" - testHelperFailure(c, "Check(1, checker, !?)", false, false, log, - func() interface{} { - return c.Check(1, checker) - }) -} - -func (s *HelpersS) TestCheckWithTooManyExpected(c *check.C) { - checker := &MyChecker{result: true} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2, 3\\)\n" + - "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + - "\\.+ Wrong number of parameters for MyChecker: " + - "want 3, got 4\n\n" - testHelperFailure(c, "Check(1, checker, 2, 3)", false, false, log, - func() interface{} { - return c.Check(1, checker, 2, 3) - }) -} - -func (s *HelpersS) TestCheckWithError(c *check.C) { - checker := &MyChecker{result: false, error: "Some not so cool data provided!"} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n" + - "\\.+ Some not so cool data provided!\n\n" - testHelperFailure(c, "Check(1, checker, 2)", false, false, log, - func() interface{} { - return c.Check(1, checker, 2) - }) -} - -func (s *HelpersS) TestCheckWithNilChecker(c *check.C) { - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, nil\\)\n" + - "\\.+ Check\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + - "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" - testHelperFailure(c, "Check(obtained, nil)", false, false, log, - func() interface{} { - return c.Check(1, nil) - }) -} - -func (s *HelpersS) TestCheckWithParamsAndNamesMutation(c *check.C) { - checker := &MyChecker{result: false, params: []interface{}{3, 4}, names: []string{"newobtained", "newexpected"}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2\\)\n" + - "\\.+ newobtained int = 3\n" + - "\\.+ newexpected int = 4\n\n" - testHelperFailure(c, "Check(1, checker, 2) with mutation", false, false, log, - func() interface{} { - return c.Check(1, checker, 2) - }) -} - -// ----------------------------------------------------------------------- -// Tests for Assert(), mostly the same as for Check() above. - -func (s *HelpersS) TestAssertSucceedWithExpected(c *check.C) { - checker := &MyChecker{result: true} - testHelperSuccess(c, "Assert(1, checker, 2)", nil, func() interface{} { - c.Assert(1, checker, 2) - return nil - }) - if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { - c.Fatalf("Bad params for check: %#v", checker.params) - } -} - -func (s *HelpersS) TestAssertSucceedWithoutExpected(c *check.C) { - checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - testHelperSuccess(c, "Assert(1, checker)", nil, func() interface{} { - c.Assert(1, checker) - return nil - }) - if !reflect.DeepEqual(checker.params, []interface{}{1}) { - c.Fatalf("Bad params for check: %#v", checker.params) - } -} - -func (s *HelpersS) TestAssertFailWithExpected(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker, 2\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n\n" - testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, - func() interface{} { - c.Assert(1, checker, 2) - return nil - }) -} - -func (s *HelpersS) TestAssertFailWithExpectedAndMessage(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n" + - "\\.+ Hello world!\n\n" - testHelperFailure(c, "Assert(1, checker, 2, msg)", nil, true, log, - func() interface{} { - c.Assert(1, checker, 2, myComment("Hello world!")) - return nil - }) -} - -func (s *HelpersS) TestAssertFailWithoutExpected(c *check.C) { - checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker\\)\n" + - "\\.+ myvalue int = 1\n\n" - testHelperFailure(c, "Assert(1, checker)", nil, true, log, - func() interface{} { - c.Assert(1, checker) - return nil - }) -} - -func (s *HelpersS) TestAssertFailWithoutExpectedAndMessage(c *check.C) { - checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + - "\\.+ myvalue int = 1\n" + - "\\.+ Hello world!\n\n" - testHelperFailure(c, "Assert(1, checker, msg)", nil, true, log, - func() interface{} { - c.Assert(1, checker, myComment("Hello world!")) - return nil - }) -} - -func (s *HelpersS) TestAssertWithMissingExpected(c *check.C) { - checker := &MyChecker{result: true} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker\\)\n" + - "\\.+ Assert\\(myobtained, MyChecker, myexpected\\):\n" + - "\\.+ Wrong number of parameters for MyChecker: " + - "want 3, got 2\n\n" - testHelperFailure(c, "Assert(1, checker, !?)", nil, true, log, - func() interface{} { - c.Assert(1, checker) - return nil - }) -} - -func (s *HelpersS) TestAssertWithError(c *check.C) { - checker := &MyChecker{result: false, error: "Some not so cool data provided!"} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker, 2\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n" + - "\\.+ Some not so cool data provided!\n\n" - testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, - func() interface{} { - c.Assert(1, checker, 2) - return nil - }) -} - -func (s *HelpersS) TestAssertWithNilChecker(c *check.C) { - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, nil\\)\n" + - "\\.+ Assert\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + - "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" - testHelperFailure(c, "Assert(obtained, nil)", nil, true, log, - func() interface{} { - c.Assert(1, nil) - return nil - }) -} - -// ----------------------------------------------------------------------- -// Ensure that values logged work properly in some interesting cases. - -func (s *HelpersS) TestValueLoggingWithArrays(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + - " return c\\.Check\\(\\[\\]byte{1, 2}, checker, \\[\\]byte{1, 3}\\)\n" + - "\\.+ myobtained \\[\\]uint8 = \\[\\]byte{0x1, 0x2}\n" + - "\\.+ myexpected \\[\\]uint8 = \\[\\]byte{0x1, 0x3}\n\n" - testHelperFailure(c, "Check([]byte{1}, chk, []byte{3})", false, false, log, - func() interface{} { - return c.Check([]byte{1, 2}, checker, []byte{1, 3}) - }) -} - -func (s *HelpersS) TestValueLoggingWithMultiLine(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + - " return c\\.Check\\(\"a\\\\nb\\\\n\", checker, \"a\\\\nb\\\\nc\"\\)\n" + - "\\.+ myobtained string = \"\" \\+\n" + - "\\.+ \"a\\\\n\" \\+\n" + - "\\.+ \"b\\\\n\"\n" + - "\\.+ myexpected string = \"\" \\+\n" + - "\\.+ \"a\\\\n\" \\+\n" + - "\\.+ \"b\\\\n\" \\+\n" + - "\\.+ \"c\"\n\n" - testHelperFailure(c, `Check("a\nb\n", chk, "a\nb\nc")`, false, false, log, - func() interface{} { - return c.Check("a\nb\n", checker, "a\nb\nc") - }) -} - -func (s *HelpersS) TestValueLoggingWithMultiLineException(c *check.C) { - // If the newline is at the end of the string, don't log as multi-line. - checker := &MyChecker{result: false} - log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + - " return c\\.Check\\(\"a b\\\\n\", checker, \"a\\\\nb\"\\)\n" + - "\\.+ myobtained string = \"a b\\\\n\"\n" + - "\\.+ myexpected string = \"\" \\+\n" + - "\\.+ \"a\\\\n\" \\+\n" + - "\\.+ \"b\"\n\n" - testHelperFailure(c, `Check("a b\n", chk, "a\nb")`, false, false, log, - func() interface{} { - return c.Check("a b\n", checker, "a\nb") - }) -} - -// ----------------------------------------------------------------------- -// MakeDir() tests. - -type MkDirHelper struct { - path1 string - path2 string - isDir1 bool - isDir2 bool - isDir3 bool - isDir4 bool -} - -func (s *MkDirHelper) SetUpSuite(c *check.C) { - s.path1 = c.MkDir() - s.isDir1 = isDir(s.path1) -} - -func (s *MkDirHelper) Test(c *check.C) { - s.path2 = c.MkDir() - s.isDir2 = isDir(s.path2) -} - -func (s *MkDirHelper) TearDownSuite(c *check.C) { - s.isDir3 = isDir(s.path1) - s.isDir4 = isDir(s.path2) -} - -func (s *HelpersS) TestMkDir(c *check.C) { - helper := MkDirHelper{} - output := String{} - check.Run(&helper, &check.RunConf{Output: &output}) - c.Assert(output.value, check.Equals, "") - c.Check(helper.isDir1, check.Equals, true) - c.Check(helper.isDir2, check.Equals, true) - c.Check(helper.isDir3, check.Equals, true) - c.Check(helper.isDir4, check.Equals, true) - c.Check(helper.path1, check.Not(check.Equals), - helper.path2) - c.Check(isDir(helper.path1), check.Equals, false) - c.Check(isDir(helper.path2), check.Equals, false) -} - -func isDir(path string) bool { - if stat, err := os.Stat(path); err == nil { - return stat.IsDir() - } - return false -} - -// Concurrent logging should not corrupt the underling buffer. -// Use go test -race to detect the race in this test. -func (s *HelpersS) TestConcurrentLogging(c *check.C) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) - var start, stop sync.WaitGroup - start.Add(1) - for i, n := 0, runtime.NumCPU()*2; i < n; i++ { - stop.Add(1) - go func(i int) { - start.Wait() - for j := 0; j < 30; j++ { - c.Logf("Worker %d: line %d", i, j) - } - stop.Done() - }(i) - } - start.Done() - stop.Wait() -} - -// ----------------------------------------------------------------------- -// Test the TestName function - -type TestNameHelper struct { - name1 string - name2 string - name3 string - name4 string - name5 string -} - -func (s *TestNameHelper) SetUpSuite(c *check.C) { s.name1 = c.TestName() } -func (s *TestNameHelper) SetUpTest(c *check.C) { s.name2 = c.TestName() } -func (s *TestNameHelper) Test(c *check.C) { s.name3 = c.TestName() } -func (s *TestNameHelper) TearDownTest(c *check.C) { s.name4 = c.TestName() } -func (s *TestNameHelper) TearDownSuite(c *check.C) { s.name5 = c.TestName() } - -func (s *HelpersS) TestTestName(c *check.C) { - helper := TestNameHelper{} - output := String{} - check.Run(&helper, &check.RunConf{Output: &output}) - c.Check(helper.name1, check.Equals, "") - c.Check(helper.name2, check.Equals, "TestNameHelper.Test") - c.Check(helper.name3, check.Equals, "TestNameHelper.Test") - c.Check(helper.name4, check.Equals, "TestNameHelper.Test") - c.Check(helper.name5, check.Equals, "") -} - -// ----------------------------------------------------------------------- -// A couple of helper functions to test helper functions. :-) - -func testHelperSuccess(c *check.C, name string, expectedResult interface{}, closure func() interface{}) { - var result interface{} - defer (func() { - if err := recover(); err != nil { - panic(err) - } - checkState(c, result, - &expectedState{ - name: name, - result: expectedResult, - failed: false, - log: "", - }) - })() - result = closure() -} - -func testHelperFailure(c *check.C, name string, expectedResult interface{}, shouldStop bool, log string, closure func() interface{}) { - var result interface{} - defer (func() { - if err := recover(); err != nil { - panic(err) - } - checkState(c, result, - &expectedState{ - name: name, - result: expectedResult, - failed: true, - log: log, - }) - })() - result = closure() - if shouldStop { - c.Logf("%s didn't stop when it should", name) - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/printer.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/printer.go deleted file mode 100644 index e0f7557b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/printer.go +++ /dev/null @@ -1,168 +0,0 @@ -package check - -import ( - "bytes" - "go/ast" - "go/parser" - "go/printer" - "go/token" - "os" -) - -func indent(s, with string) (r string) { - eol := true - for i := 0; i != len(s); i++ { - c := s[i] - switch { - case eol && c == '\n' || c == '\r': - case c == '\n' || c == '\r': - eol = true - case eol: - eol = false - s = s[:i] + with + s[i:] - i += len(with) - } - } - return s -} - -func printLine(filename string, line int) (string, error) { - fset := token.NewFileSet() - file, err := os.Open(filename) - if err != nil { - return "", err - } - fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments) - if err != nil { - return "", err - } - config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4} - lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config} - ast.Walk(lp, fnode) - result := lp.output.Bytes() - // Comments leave \n at the end. - n := len(result) - for n > 0 && result[n-1] == '\n' { - n-- - } - return string(result[:n]), nil -} - -type linePrinter struct { - config *printer.Config - fset *token.FileSet - fnode *ast.File - line int - output bytes.Buffer - stmt ast.Stmt -} - -func (lp *linePrinter) emit() bool { - if lp.stmt != nil { - lp.trim(lp.stmt) - lp.printWithComments(lp.stmt) - lp.stmt = nil - return true - } - return false -} - -func (lp *linePrinter) printWithComments(n ast.Node) { - nfirst := lp.fset.Position(n.Pos()).Line - nlast := lp.fset.Position(n.End()).Line - for _, g := range lp.fnode.Comments { - cfirst := lp.fset.Position(g.Pos()).Line - clast := lp.fset.Position(g.End()).Line - if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column { - for _, c := range g.List { - lp.output.WriteString(c.Text) - lp.output.WriteByte('\n') - } - } - if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash { - // The printer will not include the comment if it starts past - // the node itself. Trick it into printing by overlapping the - // slash with the end of the statement. - g.List[0].Slash = n.End() - 1 - } - } - node := &printer.CommentedNode{n, lp.fnode.Comments} - lp.config.Fprint(&lp.output, lp.fset, node) -} - -func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) { - if n == nil { - if lp.output.Len() == 0 { - lp.emit() - } - return nil - } - first := lp.fset.Position(n.Pos()).Line - last := lp.fset.Position(n.End()).Line - if first <= lp.line && last >= lp.line { - // Print the innermost statement containing the line. - if stmt, ok := n.(ast.Stmt); ok { - if _, ok := n.(*ast.BlockStmt); !ok { - lp.stmt = stmt - } - } - if first == lp.line && lp.emit() { - return nil - } - return lp - } - return nil -} - -func (lp *linePrinter) trim(n ast.Node) bool { - stmt, ok := n.(ast.Stmt) - if !ok { - return true - } - line := lp.fset.Position(n.Pos()).Line - if line != lp.line { - return false - } - switch stmt := stmt.(type) { - case *ast.IfStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.SwitchStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.TypeSwitchStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.CaseClause: - stmt.Body = lp.trimList(stmt.Body) - case *ast.CommClause: - stmt.Body = lp.trimList(stmt.Body) - case *ast.BlockStmt: - stmt.List = lp.trimList(stmt.List) - } - return true -} - -func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt { - if !lp.trim(stmt) { - return lp.emptyBlock(stmt) - } - stmt.Rbrace = stmt.Lbrace - return stmt -} - -func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt { - for i := 0; i != len(stmts); i++ { - if !lp.trim(stmts[i]) { - stmts[i] = lp.emptyStmt(stmts[i]) - break - } - } - return stmts -} - -func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt { - return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}} -} - -func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt { - p := n.Pos() - return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p} -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go deleted file mode 100644 index 538b2d52..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package check_test - -import ( - . "gopkg.in/check.v1" -) - -var _ = Suite(&PrinterS{}) - -type PrinterS struct{} - -func (s *PrinterS) TestCountSuite(c *C) { - suitesRun += 1 -} - -var printTestFuncLine int - -func init() { - printTestFuncLine = getMyLine() + 3 -} - -func printTestFunc() { - println(1) // Comment1 - if 2 == 2 { // Comment2 - println(3) // Comment3 - } - switch 5 { - case 6: println(6) // Comment6 - println(7) - } - switch interface{}(9).(type) {// Comment9 - case int: println(10) - println(11) - } - select { - case <-(chan bool)(nil): println(14) - println(15) - default: println(16) - println(17) - } - println(19, - 20) - _ = func() { println(21) - println(22) - } - println(24, func() { - println(25) - }) - // Leading comment - // with multiple lines. - println(29) // Comment29 -} - -var printLineTests = []struct { - line int - output string -}{ - {1, "println(1) // Comment1"}, - {2, "if 2 == 2 { // Comment2\n ...\n}"}, - {3, "println(3) // Comment3"}, - {5, "switch 5 {\n...\n}"}, - {6, "case 6:\n println(6) // Comment6\n ..."}, - {7, "println(7)"}, - {9, "switch interface{}(9).(type) { // Comment9\n...\n}"}, - {10, "case int:\n println(10)\n ..."}, - {14, "case <-(chan bool)(nil):\n println(14)\n ..."}, - {15, "println(15)"}, - {16, "default:\n println(16)\n ..."}, - {17, "println(17)"}, - {19, "println(19,\n 20)"}, - {20, "println(19,\n 20)"}, - {21, "_ = func() {\n println(21)\n println(22)\n}"}, - {22, "println(22)"}, - {24, "println(24, func() {\n println(25)\n})"}, - {25, "println(25)"}, - {26, "println(24, func() {\n println(25)\n})"}, - {29, "// Leading comment\n// with multiple lines.\nprintln(29) // Comment29"}, -} - -func (s *PrinterS) TestPrintLine(c *C) { - for _, test := range printLineTests { - output, err := PrintLine("printer_test.go", printTestFuncLine+test.line) - c.Assert(err, IsNil) - c.Assert(output, Equals, test.output) - } -} - -var indentTests = []struct { - in, out string -}{ - {"", ""}, - {"\n", "\n"}, - {"a", ">>>a"}, - {"a\n", ">>>a\n"}, - {"a\nb", ">>>a\n>>>b"}, - {" ", ">>> "}, -} - -func (s *PrinterS) TestIndent(c *C) { - for _, test := range indentTests { - out := Indent(test.in, ">>>") - c.Assert(out, Equals, test.out) - } - -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/run.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/run.go deleted file mode 100644 index da8fd798..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/run.go +++ /dev/null @@ -1,175 +0,0 @@ -package check - -import ( - "bufio" - "flag" - "fmt" - "os" - "testing" - "time" -) - -// ----------------------------------------------------------------------- -// Test suite registry. - -var allSuites []interface{} - -// Suite registers the given value as a test suite to be run. Any methods -// starting with the Test prefix in the given value will be considered as -// a test method. -func Suite(suite interface{}) interface{} { - allSuites = append(allSuites, suite) - return suite -} - -// ----------------------------------------------------------------------- -// Public running interface. - -var ( - oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run") - oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode") - oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)") - oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks") - oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark") - oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run") - oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory") - - newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run") - newVerboseFlag = flag.Bool("check.v", false, "Verbose mode") - newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)") - newBenchFlag = flag.Bool("check.b", false, "Run benchmarks") - newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark") - newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") - newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") - newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") -) - -// TestingT runs all test suites registered with the Suite function, -// printing results to stdout, and reporting any failures back to -// the "testing" package. -func TestingT(testingT *testing.T) { - benchTime := *newBenchTime - if benchTime == 1*time.Second { - benchTime = *oldBenchTime - } - conf := &RunConf{ - Filter: *oldFilterFlag + *newFilterFlag, - Verbose: *oldVerboseFlag || *newVerboseFlag, - Stream: *oldStreamFlag || *newStreamFlag, - Benchmark: *oldBenchFlag || *newBenchFlag, - BenchmarkTime: benchTime, - BenchmarkMem: *newBenchMem, - KeepWorkDir: *oldWorkFlag || *newWorkFlag, - } - if *oldListFlag || *newListFlag { - w := bufio.NewWriter(os.Stdout) - for _, name := range ListAll(conf) { - fmt.Fprintln(w, name) - } - w.Flush() - return - } - result := RunAll(conf) - println(result.String()) - if !result.Passed() { - testingT.Fail() - } -} - -// RunAll runs all test suites registered with the Suite function, using the -// provided run configuration. -func RunAll(runConf *RunConf) *Result { - result := Result{} - for _, suite := range allSuites { - result.Add(Run(suite, runConf)) - } - return &result -} - -// Run runs the provided test suite using the provided run configuration. -func Run(suite interface{}, runConf *RunConf) *Result { - runner := newSuiteRunner(suite, runConf) - return runner.run() -} - -// ListAll returns the names of all the test functions registered with the -// Suite function that will be run with the provided run configuration. -func ListAll(runConf *RunConf) []string { - var names []string - for _, suite := range allSuites { - names = append(names, List(suite, runConf)...) - } - return names -} - -// List returns the names of the test functions in the given -// suite that will be run with the provided run configuration. -func List(suite interface{}, runConf *RunConf) []string { - var names []string - runner := newSuiteRunner(suite, runConf) - for _, t := range runner.tests { - names = append(names, t.String()) - } - return names -} - -// ----------------------------------------------------------------------- -// Result methods. - -func (r *Result) Add(other *Result) { - r.Succeeded += other.Succeeded - r.Skipped += other.Skipped - r.Failed += other.Failed - r.Panicked += other.Panicked - r.FixturePanicked += other.FixturePanicked - r.ExpectedFailures += other.ExpectedFailures - r.Missed += other.Missed - if r.WorkDir != "" && other.WorkDir != "" { - r.WorkDir += ":" + other.WorkDir - } else if other.WorkDir != "" { - r.WorkDir = other.WorkDir - } -} - -func (r *Result) Passed() bool { - return (r.Failed == 0 && r.Panicked == 0 && - r.FixturePanicked == 0 && r.Missed == 0 && - r.RunError == nil) -} - -func (r *Result) String() string { - if r.RunError != nil { - return "ERROR: " + r.RunError.Error() - } - - var value string - if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && - r.Missed == 0 { - value = "OK: " - } else { - value = "OOPS: " - } - value += fmt.Sprintf("%d passed", r.Succeeded) - if r.Skipped != 0 { - value += fmt.Sprintf(", %d skipped", r.Skipped) - } - if r.ExpectedFailures != 0 { - value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures) - } - if r.Failed != 0 { - value += fmt.Sprintf(", %d FAILED", r.Failed) - } - if r.Panicked != 0 { - value += fmt.Sprintf(", %d PANICKED", r.Panicked) - } - if r.FixturePanicked != 0 { - value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked) - } - if r.Missed != 0 { - value += fmt.Sprintf(", %d MISSED", r.Missed) - } - if r.WorkDir != "" { - value += "\nWORK=" + r.WorkDir - } - return value -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go deleted file mode 100644 index f41fffc3..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go +++ /dev/null @@ -1,419 +0,0 @@ -// These tests verify the test running logic. - -package check_test - -import ( - "errors" - . "gopkg.in/check.v1" - "os" - "sync" -) - -var runnerS = Suite(&RunS{}) - -type RunS struct{} - -func (s *RunS) TestCountSuite(c *C) { - suitesRun += 1 -} - -// ----------------------------------------------------------------------- -// Tests ensuring result counting works properly. - -func (s *RunS) TestSuccess(c *C) { - output := String{} - result := Run(&SuccessHelper{}, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 1) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 0) - c.Check(result.Missed, Equals, 0) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestFailure(c *C) { - output := String{} - result := Run(&FailHelper{}, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 0) - c.Check(result.Failed, Equals, 1) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 0) - c.Check(result.Missed, Equals, 0) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestFixture(c *C) { - output := String{} - result := Run(&FixtureHelper{}, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 2) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 0) - c.Check(result.Missed, Equals, 0) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestPanicOnTest(c *C) { - output := String{} - helper := &FixtureHelper{panicOn: "Test1"} - result := Run(helper, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 1) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 1) - c.Check(result.FixturePanicked, Equals, 0) - c.Check(result.Missed, Equals, 0) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestPanicOnSetUpTest(c *C) { - output := String{} - helper := &FixtureHelper{panicOn: "SetUpTest"} - result := Run(helper, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 0) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 1) - c.Check(result.Missed, Equals, 2) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestPanicOnSetUpSuite(c *C) { - output := String{} - helper := &FixtureHelper{panicOn: "SetUpSuite"} - result := Run(helper, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 0) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 1) - c.Check(result.Missed, Equals, 2) - c.Check(result.RunError, IsNil) -} - -// ----------------------------------------------------------------------- -// Check result aggregation. - -func (s *RunS) TestAdd(c *C) { - result := &Result{ - Succeeded: 1, - Skipped: 2, - Failed: 3, - Panicked: 4, - FixturePanicked: 5, - Missed: 6, - ExpectedFailures: 7, - } - result.Add(&Result{ - Succeeded: 10, - Skipped: 20, - Failed: 30, - Panicked: 40, - FixturePanicked: 50, - Missed: 60, - ExpectedFailures: 70, - }) - c.Check(result.Succeeded, Equals, 11) - c.Check(result.Skipped, Equals, 22) - c.Check(result.Failed, Equals, 33) - c.Check(result.Panicked, Equals, 44) - c.Check(result.FixturePanicked, Equals, 55) - c.Check(result.Missed, Equals, 66) - c.Check(result.ExpectedFailures, Equals, 77) - c.Check(result.RunError, IsNil) -} - -// ----------------------------------------------------------------------- -// Check the Passed() method. - -func (s *RunS) TestPassed(c *C) { - c.Assert((&Result{}).Passed(), Equals, true) - c.Assert((&Result{Succeeded: 1}).Passed(), Equals, true) - c.Assert((&Result{Skipped: 1}).Passed(), Equals, true) - c.Assert((&Result{Failed: 1}).Passed(), Equals, false) - c.Assert((&Result{Panicked: 1}).Passed(), Equals, false) - c.Assert((&Result{FixturePanicked: 1}).Passed(), Equals, false) - c.Assert((&Result{Missed: 1}).Passed(), Equals, false) - c.Assert((&Result{RunError: errors.New("!")}).Passed(), Equals, false) -} - -// ----------------------------------------------------------------------- -// Check that result printing is working correctly. - -func (s *RunS) TestPrintSuccess(c *C) { - result := &Result{Succeeded: 5} - c.Check(result.String(), Equals, "OK: 5 passed") -} - -func (s *RunS) TestPrintFailure(c *C) { - result := &Result{Failed: 5} - c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FAILED") -} - -func (s *RunS) TestPrintSkipped(c *C) { - result := &Result{Skipped: 5} - c.Check(result.String(), Equals, "OK: 0 passed, 5 skipped") -} - -func (s *RunS) TestPrintExpectedFailures(c *C) { - result := &Result{ExpectedFailures: 5} - c.Check(result.String(), Equals, "OK: 0 passed, 5 expected failures") -} - -func (s *RunS) TestPrintPanicked(c *C) { - result := &Result{Panicked: 5} - c.Check(result.String(), Equals, "OOPS: 0 passed, 5 PANICKED") -} - -func (s *RunS) TestPrintFixturePanicked(c *C) { - result := &Result{FixturePanicked: 5} - c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FIXTURE-PANICKED") -} - -func (s *RunS) TestPrintMissed(c *C) { - result := &Result{Missed: 5} - c.Check(result.String(), Equals, "OOPS: 0 passed, 5 MISSED") -} - -func (s *RunS) TestPrintAll(c *C) { - result := &Result{Succeeded: 1, Skipped: 2, ExpectedFailures: 3, - Panicked: 4, FixturePanicked: 5, Missed: 6} - c.Check(result.String(), Equals, - "OOPS: 1 passed, 2 skipped, 3 expected failures, 4 PANICKED, "+ - "5 FIXTURE-PANICKED, 6 MISSED") -} - -func (s *RunS) TestPrintRunError(c *C) { - result := &Result{Succeeded: 1, Failed: 1, - RunError: errors.New("Kaboom!")} - c.Check(result.String(), Equals, "ERROR: Kaboom!") -} - -// ----------------------------------------------------------------------- -// Verify that the method pattern flag works correctly. - -func (s *RunS) TestFilterTestName(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "Test[91]"} - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 5) -} - -func (s *RunS) TestFilterTestNameWithAll(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: ".*"} - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) -} - -func (s *RunS) TestFilterSuiteName(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "FixtureHelper"} - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) -} - -func (s *RunS) TestFilterSuiteNameAndTestName(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "FixtureHelper\\.Test2"} - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test2") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 5) -} - -func (s *RunS) TestFilterAllOut(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "NotFound"} - Run(&helper, &runConf) - c.Check(len(helper.calls), Equals, 0) -} - -func (s *RunS) TestRequirePartialMatch(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "est"} - Run(&helper, &runConf) - c.Check(len(helper.calls), Equals, 8) -} - -func (s *RunS) TestFilterError(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "]["} - result := Run(&helper, &runConf) - c.Check(result.String(), Equals, - "ERROR: Bad filter expression: error parsing regexp: missing closing ]: `[`") - c.Check(len(helper.calls), Equals, 0) -} - -// ----------------------------------------------------------------------- -// Verify that List works correctly. - -func (s *RunS) TestListFiltered(c *C) { - names := List(&FixtureHelper{}, &RunConf{Filter: "1"}) - c.Assert(names, DeepEquals, []string{ - "FixtureHelper.Test1", - }) -} - -func (s *RunS) TestList(c *C) { - names := List(&FixtureHelper{}, &RunConf{}) - c.Assert(names, DeepEquals, []string{ - "FixtureHelper.Test1", - "FixtureHelper.Test2", - }) -} - -// ----------------------------------------------------------------------- -// Verify that verbose mode prints tests which pass as well. - -func (s *RunS) TestVerboseMode(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Verbose: true} - Run(&helper, &runConf) - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" + - "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" - - c.Assert(output.value, Matches, expected) -} - -func (s *RunS) TestVerboseModeWithFailBeforePass(c *C) { - helper := FixtureHelper{panicOn: "Test1"} - output := String{} - runConf := RunConf{Output: &output, Verbose: true} - Run(&helper, &runConf) - - expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line. - "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" - - c.Assert(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// Verify the stream output mode. In this mode there's no output caching. - -type StreamHelper struct { - l2 sync.Mutex - l3 sync.Mutex -} - -func (s *StreamHelper) SetUpSuite(c *C) { - c.Log("0") -} - -func (s *StreamHelper) Test1(c *C) { - c.Log("1") - s.l2.Lock() - s.l3.Lock() - go func() { - s.l2.Lock() // Wait for "2". - c.Log("3") - s.l3.Unlock() - }() -} - -func (s *StreamHelper) Test2(c *C) { - c.Log("2") - s.l2.Unlock() - s.l3.Lock() // Wait for "3". - c.Fail() - c.Log("4") -} - -func (s *RunS) TestStreamMode(c *C) { - helper := &StreamHelper{} - output := String{} - runConf := RunConf{Output: &output, Stream: true} - Run(helper, &runConf) - - expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" + - "PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" + - "START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" + - "PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" + - "START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" + - "FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n" - - c.Assert(output.value, Matches, expected) -} - -type StreamMissHelper struct{} - -func (s *StreamMissHelper) SetUpSuite(c *C) { - c.Log("0") - c.Fail() -} - -func (s *StreamMissHelper) Test1(c *C) { - c.Log("1") -} - -func (s *RunS) TestStreamModeWithMiss(c *C) { - helper := &StreamMissHelper{} - output := String{} - runConf := RunConf{Output: &output, Stream: true} - Run(helper, &runConf) - - expected := "START: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n0\n" + - "FAIL: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n\n" + - "START: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n" + - "MISS: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n\n" - - c.Assert(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// Verify that that the keep work dir request indeed does so. - -type WorkDirSuite struct {} - -func (s *WorkDirSuite) Test(c *C) { - c.MkDir() -} - -func (s *RunS) TestKeepWorkDir(c *C) { - output := String{} - runConf := RunConf{Output: &output, Verbose: true, KeepWorkDir: true} - result := Run(&WorkDirSuite{}, &runConf) - - c.Assert(result.String(), Matches, ".*\nWORK=" + result.WorkDir) - - stat, err := os.Stat(result.WorkDir) - c.Assert(err, IsNil) - c.Assert(stat.IsDir(), Equals, true) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index a68e67f0..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,188 +0,0 @@ - -Copyright (c) 2011-2014 - Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md deleted file mode 100644 index d6c919e6..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -type T struct { - A string - B struct{C int; D []int ",flow"} -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index 95ec014e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,742 +0,0 @@ -package yaml - -import ( - "io" - "os" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// File read handler. -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_file.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_file_read_handler - parser.input_file = file -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - return true -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// File write handler. -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_file.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_file_write_handler - emitter.output_file = file -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } - return true -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } - return true -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } - return true -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } - return true -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } - return true -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compliler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index ec9d2710..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,667 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - - if len(b) == 0 { - b = []byte{'\n'} - } - - yaml_parser_set_input_string(&p.parser, b) - - p.skip() - if p.event.typ != yaml_STREAM_START_EVENT { - panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return &p -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -func (p *parser) skip() { - if p.event.typ != yaml_NO_EVENT { - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - yaml_event_delete(&p.event) - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - switch p.event.typ { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) - } - panic("unreachable") -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.skip() - n.children = append(n.children, p.parse()) - if p.event.typ != yaml_DOCUMENT_END_EVENT { - panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - p.skip() - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.skip() - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.skip() - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.skip() - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[string]bool - mapType reflect.Type - terrors []string -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() -) - -func newDecoder() *decoder { - d := &decoder{mapType: defaultMapType} - d.aliases = make(map[string]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - an, ok := d.doc.anchors[n.value] - if !ok { - failf("unknown anchor '%s' referenced", n.value) - } - if d.aliases[n.value] { - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n.value] = true - good = d.unmarshal(an, out) - delete(d.aliases, n.value) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if s, ok := resolved.(string); ok && out.CanAddr() { - if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { - err := u.UnmarshalText([]byte(s)) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - good = true - } else if resolved != nil { - out.SetString(n.value) - good = true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else { - out.Set(reflect.ValueOf(resolved)) - } - good = true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - good = true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - good = true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - good = true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - good = true - case int64: - out.SetFloat(float64(resolved)) - good = true - case uint64: - out.SetFloat(float64(resolved)) - good = true - case float64: - out.SetFloat(resolved) - good = true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - good = true - } - } - if !good { - d.terror(n, tag, out) - } - return good -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - out.Set(out.Slice(0, j)) - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - out.SetMapIndex(k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go deleted file mode 100644 index b91a060e..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go +++ /dev/null @@ -1,953 +0,0 @@ -package yaml_test - -import ( - "errors" - . "gopkg.in/check.v1" - "gopkg.in/yaml.v2" - "math" - "net" - "reflect" - "strings" - "time" -) - -var unmarshalIntTest = 123 - -var unmarshalTests = []struct { - data string - value interface{} -}{ - { - "", - &struct{}{}, - }, { - "{}", &struct{}{}, - }, { - "v: hi", - map[string]string{"v": "hi"}, - }, { - "v: hi", map[string]interface{}{"v": "hi"}, - }, { - "v: true", - map[string]string{"v": "true"}, - }, { - "v: true", - map[string]interface{}{"v": true}, - }, { - "v: 10", - map[string]interface{}{"v": 10}, - }, { - "v: 0b10", - map[string]interface{}{"v": 2}, - }, { - "v: 0xA", - map[string]interface{}{"v": 10}, - }, { - "v: 4294967296", - map[string]int64{"v": 4294967296}, - }, { - "v: 0.1", - map[string]interface{}{"v": 0.1}, - }, { - "v: .1", - map[string]interface{}{"v": 0.1}, - }, { - "v: .Inf", - map[string]interface{}{"v": math.Inf(+1)}, - }, { - "v: -.Inf", - map[string]interface{}{"v": math.Inf(-1)}, - }, { - "v: -10", - map[string]interface{}{"v": -10}, - }, { - "v: -.1", - map[string]interface{}{"v": -0.1}, - }, - - // Simple values. - { - "123", - &unmarshalIntTest, - }, - - // Floats from spec - { - "canonical: 6.8523e+5", - map[string]interface{}{"canonical": 6.8523e+5}, - }, { - "expo: 685.230_15e+03", - map[string]interface{}{"expo": 685.23015e+03}, - }, { - "fixed: 685_230.15", - map[string]interface{}{"fixed": 685230.15}, - }, { - "neginf: -.inf", - map[string]interface{}{"neginf": math.Inf(-1)}, - }, { - "fixed: 685_230.15", - map[string]float64{"fixed": 685230.15}, - }, - //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported - //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. - - // Bools from spec - { - "canonical: y", - map[string]interface{}{"canonical": true}, - }, { - "answer: NO", - map[string]interface{}{"answer": false}, - }, { - "logical: True", - map[string]interface{}{"logical": true}, - }, { - "option: on", - map[string]interface{}{"option": true}, - }, { - "option: on", - map[string]bool{"option": true}, - }, - // Ints from spec - { - "canonical: 685230", - map[string]interface{}{"canonical": 685230}, - }, { - "decimal: +685_230", - map[string]interface{}{"decimal": 685230}, - }, { - "octal: 02472256", - map[string]interface{}{"octal": 685230}, - }, { - "hexa: 0x_0A_74_AE", - map[string]interface{}{"hexa": 685230}, - }, { - "bin: 0b1010_0111_0100_1010_1110", - map[string]interface{}{"bin": 685230}, - }, { - "bin: -0b101010", - map[string]interface{}{"bin": -42}, - }, { - "decimal: +685_230", - map[string]int{"decimal": 685230}, - }, - - //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported - - // Nulls from spec - { - "empty:", - map[string]interface{}{"empty": nil}, - }, { - "canonical: ~", - map[string]interface{}{"canonical": nil}, - }, { - "english: null", - map[string]interface{}{"english": nil}, - }, { - "~: null key", - map[interface{}]string{nil: "null key"}, - }, { - "empty:", - map[string]*bool{"empty": nil}, - }, - - // Flow sequence - { - "seq: [A,B]", - map[string]interface{}{"seq": []interface{}{"A", "B"}}, - }, { - "seq: [A,B,C,]", - map[string][]string{"seq": []string{"A", "B", "C"}}, - }, { - "seq: [A,1,C]", - map[string][]string{"seq": []string{"A", "1", "C"}}, - }, { - "seq: [A,1,C]", - map[string][]int{"seq": []int{1}}, - }, { - "seq: [A,1,C]", - map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, - }, - // Block sequence - { - "seq:\n - A\n - B", - map[string]interface{}{"seq": []interface{}{"A", "B"}}, - }, { - "seq:\n - A\n - B\n - C", - map[string][]string{"seq": []string{"A", "B", "C"}}, - }, { - "seq:\n - A\n - 1\n - C", - map[string][]string{"seq": []string{"A", "1", "C"}}, - }, { - "seq:\n - A\n - 1\n - C", - map[string][]int{"seq": []int{1}}, - }, { - "seq:\n - A\n - 1\n - C", - map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, - }, - - // Literal block scalar - { - "scalar: | # Comment\n\n literal\n\n \ttext\n\n", - map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, - }, - - // Folded block scalar - { - "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", - map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, - }, - - // Map inside interface with no type hints. - { - "a: {b: c}", - map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, - }, - - // Structs and type conversions. - { - "hello: world", - &struct{ Hello string }{"world"}, - }, { - "a: {b: c}", - &struct{ A struct{ B string } }{struct{ B string }{"c"}}, - }, { - "a: {b: c}", - &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, - }, { - "a: {b: c}", - &struct{ A map[string]string }{map[string]string{"b": "c"}}, - }, { - "a: {b: c}", - &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, - }, { - "a:", - &struct{ A map[string]string }{}, - }, { - "a: 1", - &struct{ A int }{1}, - }, { - "a: 1", - &struct{ A float64 }{1}, - }, { - "a: 1.0", - &struct{ A int }{1}, - }, { - "a: 1.0", - &struct{ A uint }{1}, - }, { - "a: [1, 2]", - &struct{ A []int }{[]int{1, 2}}, - }, { - "a: 1", - &struct{ B int }{0}, - }, { - "a: 1", - &struct { - B int "a" - }{1}, - }, { - "a: y", - &struct{ A bool }{true}, - }, - - // Some cross type conversions - { - "v: 42", - map[string]uint{"v": 42}, - }, { - "v: -42", - map[string]uint{}, - }, { - "v: 4294967296", - map[string]uint64{"v": 4294967296}, - }, { - "v: -4294967296", - map[string]uint64{}, - }, - - // int - { - "int_max: 2147483647", - map[string]int{"int_max": math.MaxInt32}, - }, - { - "int_min: -2147483648", - map[string]int{"int_min": math.MinInt32}, - }, - { - "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 - map[string]int{}, - }, - - // int64 - { - "int64_max: 9223372036854775807", - map[string]int64{"int64_max": math.MaxInt64}, - }, - { - "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", - map[string]int64{"int64_max_base2": math.MaxInt64}, - }, - { - "int64_min: -9223372036854775808", - map[string]int64{"int64_min": math.MinInt64}, - }, - { - "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", - map[string]int64{"int64_neg_base2": -math.MaxInt64}, - }, - { - "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 - map[string]int64{}, - }, - - // uint - { - "uint_min: 0", - map[string]uint{"uint_min": 0}, - }, - { - "uint_max: 4294967295", - map[string]uint{"uint_max": math.MaxUint32}, - }, - { - "uint_underflow: -1", - map[string]uint{}, - }, - - // uint64 - { - "uint64_min: 0", - map[string]uint{"uint64_min": 0}, - }, - { - "uint64_max: 18446744073709551615", - map[string]uint64{"uint64_max": math.MaxUint64}, - }, - { - "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", - map[string]uint64{"uint64_max_base2": math.MaxUint64}, - }, - { - "uint64_maxint64: 9223372036854775807", - map[string]uint64{"uint64_maxint64": math.MaxInt64}, - }, - { - "uint64_underflow: -1", - map[string]uint64{}, - }, - - // float32 - { - "float32_max: 3.40282346638528859811704183484516925440e+38", - map[string]float32{"float32_max": math.MaxFloat32}, - }, - { - "float32_nonzero: 1.401298464324817070923729583289916131280e-45", - map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, - }, - { - "float32_maxuint64: 18446744073709551615", - map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, - }, - { - "float32_maxuint64+1: 18446744073709551616", - map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, - }, - - // float64 - { - "float64_max: 1.797693134862315708145274237317043567981e+308", - map[string]float64{"float64_max": math.MaxFloat64}, - }, - { - "float64_nonzero: 4.940656458412465441765687928682213723651e-324", - map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, - }, - { - "float64_maxuint64: 18446744073709551615", - map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, - }, - { - "float64_maxuint64+1: 18446744073709551616", - map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, - }, - - // Overflow cases. - { - "v: 4294967297", - map[string]int32{}, - }, { - "v: 128", - map[string]int8{}, - }, - - // Quoted values. - { - "'1': '\"2\"'", - map[interface{}]interface{}{"1": "\"2\""}, - }, { - "v:\n- A\n- 'B\n\n C'\n", - map[string][]string{"v": []string{"A", "B\nC"}}, - }, - - // Explicit tags. - { - "v: !!float '1.1'", - map[string]interface{}{"v": 1.1}, - }, { - "v: !!null ''", - map[string]interface{}{"v": nil}, - }, { - "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", - map[string]interface{}{"v": 1}, - }, - - // Anchors and aliases. - { - "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", - &struct{ A, B, C, D int }{1, 2, 1, 2}, - }, { - "a: &a {c: 1}\nb: *a", - &struct { - A, B struct { - C int - } - }{struct{ C int }{1}, struct{ C int }{1}}, - }, { - "a: &a [1, 2]\nb: *a", - &struct{ B []int }{[]int{1, 2}}, - }, { - "b: *a\na: &a {c: 1}", - &struct { - A, B struct { - C int - } - }{struct{ C int }{1}, struct{ C int }{1}}, - }, - - // Bug #1133337 - { - "foo: ''", - map[string]*string{"foo": new(string)}, - }, { - "foo: null", - map[string]string{"foo": ""}, - }, { - "foo: null", - map[string]interface{}{"foo": nil}, - }, - - // Ignored field - { - "a: 1\nb: 2\n", - &struct { - A int - B int "-" - }{1, 0}, - }, - - // Bug #1191981 - { - "" + - "%YAML 1.1\n" + - "--- !!str\n" + - `"Generic line break (no glyph)\n\` + "\n" + - ` Generic line break (glyphed)\n\` + "\n" + - ` Line separator\u2028\` + "\n" + - ` Paragraph separator\u2029"` + "\n", - "" + - "Generic line break (no glyph)\n" + - "Generic line break (glyphed)\n" + - "Line separator\u2028Paragraph separator\u2029", - }, - - // Struct inlining - { - "a: 1\nb: 2\nc: 3\n", - &struct { - A int - C inlineB `yaml:",inline"` - }{1, inlineB{2, inlineC{3}}}, - }, - - // bug 1243827 - { - "a: -b_c", - map[string]interface{}{"a": "-b_c"}, - }, - { - "a: +b_c", - map[string]interface{}{"a": "+b_c"}, - }, - { - "a: 50cent_of_dollar", - map[string]interface{}{"a": "50cent_of_dollar"}, - }, - - // Duration - { - "a: 3s", - map[string]time.Duration{"a": 3 * time.Second}, - }, - - // Issue #24. - { - "a: ", - map[string]string{"a": ""}, - }, - - // Base 60 floats are obsolete and unsupported. - { - "a: 1:1\n", - map[string]string{"a": "1:1"}, - }, - - // Binary data. - { - "a: !!binary gIGC\n", - map[string]string{"a": "\x80\x81\x82"}, - }, { - "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", - map[string]string{"a": strings.Repeat("\x90", 54)}, - }, { - "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", - map[string]string{"a": strings.Repeat("\x00", 52)}, - }, - - // Ordered maps. - { - "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", - &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, - }, - - // Issue #39. - { - "a:\n b:\n c: d\n", - map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, - }, - - // Custom map type. - { - "a: {b: c}", - M{"a": M{"b": "c"}}, - }, - - // Support encoding.TextUnmarshaler. - { - "a: 1.2.3.4\n", - map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, - }, - - // Encode empty lists as zero-length slices. - { - "a: []", - &struct{ A []int }{[]int{}}, - }, -} - -type M map[interface{}]interface{} - -type inlineB struct { - B int - inlineC `yaml:",inline"` -} - -type inlineC struct { - C int -} - -func (s *S) TestUnmarshal(c *C) { - for _, item := range unmarshalTests { - t := reflect.ValueOf(item.value).Type() - var value interface{} - switch t.Kind() { - case reflect.Map: - value = reflect.MakeMap(t).Interface() - case reflect.String: - value = reflect.New(t).Interface() - case reflect.Ptr: - value = reflect.New(t.Elem()).Interface() - default: - c.Fatalf("missing case for %s", t) - } - err := yaml.Unmarshal([]byte(item.data), value) - if _, ok := err.(*yaml.TypeError); !ok { - c.Assert(err, IsNil) - } - if t.Kind() == reflect.String { - c.Assert(*value.(*string), Equals, item.value) - } else { - c.Assert(value, DeepEquals, item.value) - } - } -} - -func (s *S) TestUnmarshalNaN(c *C) { - value := map[string]interface{}{} - err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) - c.Assert(err, IsNil) - c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) -} - -var unmarshalErrorTests = []struct { - data, error string -}{ - {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, - {"v: [A,", "yaml: line 1: did not find expected node content"}, - {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, - {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, - {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, - {"value: -", "yaml: block sequence entries are not allowed in this context"}, - {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, - {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, - {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, -} - -func (s *S) TestUnmarshalErrors(c *C) { - for _, item := range unmarshalErrorTests { - var value interface{} - err := yaml.Unmarshal([]byte(item.data), &value) - c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) - } -} - -var unmarshalerTests = []struct { - data, tag string - value interface{} -}{ - {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, - {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, - {"_: 10", "!!int", 10}, - {"_: null", "!!null", nil}, - {`_: BAR!`, "!!str", "BAR!"}, - {`_: "BAR!"`, "!!str", "BAR!"}, - {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, -} - -var unmarshalerResult = map[int]error{} - -type unmarshalerType struct { - value interface{} -} - -func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { - if err := unmarshal(&o.value); err != nil { - return err - } - if i, ok := o.value.(int); ok { - if result, ok := unmarshalerResult[i]; ok { - return result - } - } - return nil -} - -type unmarshalerPointer struct { - Field *unmarshalerType "_" -} - -type unmarshalerValue struct { - Field unmarshalerType "_" -} - -func (s *S) TestUnmarshalerPointerField(c *C) { - for _, item := range unmarshalerTests { - obj := &unmarshalerPointer{} - err := yaml.Unmarshal([]byte(item.data), obj) - c.Assert(err, IsNil) - if item.value == nil { - c.Assert(obj.Field, IsNil) - } else { - c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) - c.Assert(obj.Field.value, DeepEquals, item.value) - } - } -} - -func (s *S) TestUnmarshalerValueField(c *C) { - for _, item := range unmarshalerTests { - obj := &unmarshalerValue{} - err := yaml.Unmarshal([]byte(item.data), obj) - c.Assert(err, IsNil) - c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) - c.Assert(obj.Field.value, DeepEquals, item.value) - } -} - -func (s *S) TestUnmarshalerWholeDocument(c *C) { - obj := &unmarshalerType{} - err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) - c.Assert(err, IsNil) - value, ok := obj.value.(map[interface{}]interface{}) - c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) - c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) -} - -func (s *S) TestUnmarshalerTypeError(c *C) { - unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} - unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} - defer func() { - delete(unmarshalerResult, 2) - delete(unmarshalerResult, 4) - }() - - type T struct { - Before int - After int - M map[string]*unmarshalerType - } - var v T - data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` - err := yaml.Unmarshal([]byte(data), &v) - c.Assert(err, ErrorMatches, ""+ - "yaml: unmarshal errors:\n"+ - " line 1: cannot unmarshal !!str `A` into int\n"+ - " foo\n"+ - " bar\n"+ - " line 1: cannot unmarshal !!str `B` into int") - c.Assert(v.M["abc"], NotNil) - c.Assert(v.M["def"], IsNil) - c.Assert(v.M["ghi"], NotNil) - c.Assert(v.M["jkl"], IsNil) - - c.Assert(v.M["abc"].value, Equals, 1) - c.Assert(v.M["ghi"].value, Equals, 3) -} - -type proxyTypeError struct{} - -func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - var a int32 - var b int64 - if err := unmarshal(&s); err != nil { - panic(err) - } - if s == "a" { - if err := unmarshal(&b); err == nil { - panic("should have failed") - } - return unmarshal(&a) - } - if err := unmarshal(&a); err == nil { - panic("should have failed") - } - return unmarshal(&b) -} - -func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { - type T struct { - Before int - After int - M map[string]*proxyTypeError - } - var v T - data := `{before: A, m: {abc: a, def: b}, after: B}` - err := yaml.Unmarshal([]byte(data), &v) - c.Assert(err, ErrorMatches, ""+ - "yaml: unmarshal errors:\n"+ - " line 1: cannot unmarshal !!str `A` into int\n"+ - " line 1: cannot unmarshal !!str `a` into int32\n"+ - " line 1: cannot unmarshal !!str `b` into int64\n"+ - " line 1: cannot unmarshal !!str `B` into int") -} - -type failingUnmarshaler struct{} - -var failingErr = errors.New("failingErr") - -func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { - return failingErr -} - -func (s *S) TestUnmarshalerError(c *C) { - err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) - c.Assert(err, Equals, failingErr) -} - -type sliceUnmarshaler []int - -func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { - var slice []int - err := unmarshal(&slice) - if err == nil { - *su = slice - return nil - } - - var intVal int - err = unmarshal(&intVal) - if err == nil { - *su = []int{intVal} - return nil - } - - return err -} - -func (s *S) TestUnmarshalerRetry(c *C) { - var su sliceUnmarshaler - err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) - c.Assert(err, IsNil) - c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) - - err = yaml.Unmarshal([]byte("1"), &su) - c.Assert(err, IsNil) - c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) -} - -// From http://yaml.org/type/merge.html -var mergeTests = ` -anchors: - list: - - &CENTER { "x": 1, "y": 2 } - - &LEFT { "x": 0, "y": 2 } - - &BIG { "r": 10 } - - &SMALL { "r": 1 } - -# All the following maps are equal: - -plain: - # Explicit keys - "x": 1 - "y": 2 - "r": 10 - label: center/big - -mergeOne: - # Merge one map - << : *CENTER - "r": 10 - label: center/big - -mergeMultiple: - # Merge multiple maps - << : [ *CENTER, *BIG ] - label: center/big - -override: - # Override - << : [ *BIG, *LEFT, *SMALL ] - "x": 1 - label: center/big - -shortTag: - # Explicit short merge tag - !!merge "<<" : [ *CENTER, *BIG ] - label: center/big - -longTag: - # Explicit merge long tag - ! "<<" : [ *CENTER, *BIG ] - label: center/big - -inlineMap: - # Inlined map - << : {"x": 1, "y": 2, "r": 10} - label: center/big - -inlineSequenceMap: - # Inlined map in sequence - << : [ *CENTER, {"r": 10} ] - label: center/big -` - -func (s *S) TestMerge(c *C) { - var want = map[interface{}]interface{}{ - "x": 1, - "y": 2, - "r": 10, - "label": "center/big", - } - - var m map[interface{}]interface{} - err := yaml.Unmarshal([]byte(mergeTests), &m) - c.Assert(err, IsNil) - for name, test := range m { - if name == "anchors" { - continue - } - c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) - } -} - -func (s *S) TestMergeStruct(c *C) { - type Data struct { - X, Y, R int - Label string - } - want := Data{1, 2, 10, "center/big"} - - var m map[string]Data - err := yaml.Unmarshal([]byte(mergeTests), &m) - c.Assert(err, IsNil) - for name, test := range m { - if name == "anchors" { - continue - } - c.Assert(test, Equals, want, Commentf("test %q failed", name)) - } -} - -var unmarshalNullTests = []func() interface{}{ - func() interface{} { var v interface{}; v = "v"; return &v }, - func() interface{} { var s = "s"; return &s }, - func() interface{} { var s = "s"; sptr := &s; return &sptr }, - func() interface{} { var i = 1; return &i }, - func() interface{} { var i = 1; iptr := &i; return &iptr }, - func() interface{} { m := map[string]int{"s": 1}; return &m }, - func() interface{} { m := map[string]int{"s": 1}; return m }, -} - -func (s *S) TestUnmarshalNull(c *C) { - for _, test := range unmarshalNullTests { - item := test() - zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() - err := yaml.Unmarshal([]byte("null"), item) - c.Assert(err, IsNil) - if reflect.TypeOf(item).Kind() == reflect.Map { - c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) - } else { - c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) - } - } -} - -func (s *S) TestUnmarshalSliceOnPreset(c *C) { - // Issue #48. - v := struct{ A []int }{[]int{1}} - yaml.Unmarshal([]byte("a: [2]"), &v) - c.Assert(v.A, DeepEquals, []int{2}) -} - -//var data []byte -//func init() { -// var err error -// data, err = ioutil.ReadFile("/tmp/file.yaml") -// if err != nil { -// panic(err) -// } -//} -// -//func (s *S) BenchmarkUnmarshal(c *C) { -// var err error -// for i := 0; i < c.N; i++ { -// var v map[string]interface{} -// err = yaml.Unmarshal(data, &v) -// } -// if err != nil { -// panic(err) -// } -//} -// -//func (s *S) BenchmarkMarshal(c *C) { -// var v map[string]interface{} -// yaml.Unmarshal(data, &v) -// c.ResetTimer() -// for i := 0; i < c.N; i++ { -// yaml.Marshal(&v) -// } -//} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index 9b3dc4a4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") - } - return false -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an achor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceeded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceeded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[0]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceeded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceeded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index b7edc799..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -package yaml - -import ( - "encoding" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" -) - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool -} - -func newEncoder() (e *encoder) { - e = &encoder{} - e.must(yaml_emitter_initialize(&e.emitter)) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) - e.emit() - e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) - e.emit() - return e -} - -func (e *encoder) finish() { - e.must(yaml_document_end_event_initialize(&e.event, true)) - e.emit() - e.emitter.open_ended = false - e.must(yaml_stream_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { - e.must(false) - } -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() { - e.nilv() - return - } - iface := in.Interface() - if m, ok := iface.(Marshaler); ok { - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - } else if m, ok := iface.(encoding.TextMarshaler); ok { - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - } - switch in.Kind() { - case reflect.Interface: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - e.structv(tag, in) - case reflect.Slice: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - f() - e.must(yaml_mapping_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - rtag, rs := resolve("", s) - if rtag == yaml_BINARY_TAG { - if tag == "" || tag == yaml_STR_TAG { - tag = rtag - s = rs.(string) - } else if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } else { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - } - if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if strings.Contains(s, "\n") { - style = yaml_LITERAL_SCALAR_STYLE - } else { - style = yaml_PLAIN_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // FIXME: Handle 64 bits here. - s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go deleted file mode 100644 index d453f5c5..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go +++ /dev/null @@ -1,463 +0,0 @@ -package yaml_test - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" - - . "gopkg.in/check.v1" - "gopkg.in/yaml.v2" - "net" -) - -var marshalIntTest = 123 - -var marshalTests = []struct { - value interface{} - data string -}{ - { - nil, - "null\n", - }, { - &struct{}{}, - "{}\n", - }, { - map[string]string{"v": "hi"}, - "v: hi\n", - }, { - map[string]interface{}{"v": "hi"}, - "v: hi\n", - }, { - map[string]string{"v": "true"}, - "v: \"true\"\n", - }, { - map[string]string{"v": "false"}, - "v: \"false\"\n", - }, { - map[string]interface{}{"v": true}, - "v: true\n", - }, { - map[string]interface{}{"v": false}, - "v: false\n", - }, { - map[string]interface{}{"v": 10}, - "v: 10\n", - }, { - map[string]interface{}{"v": -10}, - "v: -10\n", - }, { - map[string]uint{"v": 42}, - "v: 42\n", - }, { - map[string]interface{}{"v": int64(4294967296)}, - "v: 4294967296\n", - }, { - map[string]int64{"v": int64(4294967296)}, - "v: 4294967296\n", - }, { - map[string]uint64{"v": 4294967296}, - "v: 4294967296\n", - }, { - map[string]interface{}{"v": "10"}, - "v: \"10\"\n", - }, { - map[string]interface{}{"v": 0.1}, - "v: 0.1\n", - }, { - map[string]interface{}{"v": float64(0.1)}, - "v: 0.1\n", - }, { - map[string]interface{}{"v": -0.1}, - "v: -0.1\n", - }, { - map[string]interface{}{"v": math.Inf(+1)}, - "v: .inf\n", - }, { - map[string]interface{}{"v": math.Inf(-1)}, - "v: -.inf\n", - }, { - map[string]interface{}{"v": math.NaN()}, - "v: .nan\n", - }, { - map[string]interface{}{"v": nil}, - "v: null\n", - }, { - map[string]interface{}{"v": ""}, - "v: \"\"\n", - }, { - map[string][]string{"v": []string{"A", "B"}}, - "v:\n- A\n- B\n", - }, { - map[string][]string{"v": []string{"A", "B\nC"}}, - "v:\n- A\n- |-\n B\n C\n", - }, { - map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, - "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", - }, { - map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, - "a:\n b: c\n", - }, { - map[string]interface{}{"a": "-"}, - "a: '-'\n", - }, - - // Simple values. - { - &marshalIntTest, - "123\n", - }, - - // Structures - { - &struct{ Hello string }{"world"}, - "hello: world\n", - }, { - &struct { - A struct { - B string - } - }{struct{ B string }{"c"}}, - "a:\n b: c\n", - }, { - &struct { - A *struct { - B string - } - }{&struct{ B string }{"c"}}, - "a:\n b: c\n", - }, { - &struct { - A *struct { - B string - } - }{}, - "a: null\n", - }, { - &struct{ A int }{1}, - "a: 1\n", - }, { - &struct{ A []int }{[]int{1, 2}}, - "a:\n- 1\n- 2\n", - }, { - &struct { - B int "a" - }{1}, - "a: 1\n", - }, { - &struct{ A bool }{true}, - "a: true\n", - }, - - // Conditional flag - { - &struct { - A int "a,omitempty" - B int "b,omitempty" - }{1, 0}, - "a: 1\n", - }, { - &struct { - A int "a,omitempty" - B int "b,omitempty" - }{0, 0}, - "{}\n", - }, { - &struct { - A *struct{ X, y int } "a,omitempty,flow" - }{&struct{ X, y int }{1, 2}}, - "a: {x: 1}\n", - }, { - &struct { - A *struct{ X, y int } "a,omitempty,flow" - }{nil}, - "{}\n", - }, { - &struct { - A *struct{ X, y int } "a,omitempty,flow" - }{&struct{ X, y int }{}}, - "a: {x: 0}\n", - }, { - &struct { - A struct{ X, y int } "a,omitempty,flow" - }{struct{ X, y int }{1, 2}}, - "a: {x: 1}\n", - }, { - &struct { - A struct{ X, y int } "a,omitempty,flow" - }{struct{ X, y int }{0, 1}}, - "{}\n", - }, - - // Flow flag - { - &struct { - A []int "a,flow" - }{[]int{1, 2}}, - "a: [1, 2]\n", - }, { - &struct { - A map[string]string "a,flow" - }{map[string]string{"b": "c", "d": "e"}}, - "a: {b: c, d: e}\n", - }, { - &struct { - A struct { - B, D string - } "a,flow" - }{struct{ B, D string }{"c", "e"}}, - "a: {b: c, d: e}\n", - }, - - // Unexported field - { - &struct { - u int - A int - }{0, 1}, - "a: 1\n", - }, - - // Ignored field - { - &struct { - A int - B int "-" - }{1, 2}, - "a: 1\n", - }, - - // Struct inlining - { - &struct { - A int - C inlineB `yaml:",inline"` - }{1, inlineB{2, inlineC{3}}}, - "a: 1\nb: 2\nc: 3\n", - }, - - // Duration - { - map[string]time.Duration{"a": 3 * time.Second}, - "a: 3s\n", - }, - - // Issue #24: bug in map merging logic. - { - map[string]string{"a": ""}, - "a: \n", - }, - - // Issue #34: marshal unsupported base 60 floats quoted for compatibility - // with old YAML 1.1 parsers. - { - map[string]string{"a": "1:1"}, - "a: \"1:1\"\n", - }, - - // Binary data. - { - map[string]string{"a": "\x00"}, - "a: \"\\0\"\n", - }, { - map[string]string{"a": "\x80\x81\x82"}, - "a: !!binary gIGC\n", - }, { - map[string]string{"a": strings.Repeat("\x90", 54)}, - "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", - }, - - // Ordered maps. - { - &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, - "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", - }, - - // Encode unicode as utf-8 rather than in escaped form. - { - map[string]string{"a": "你好"}, - "a: 你好\n", - }, - - // Support encoding.TextMarshaler. - { - map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, - "a: 1.2.3.4\n", - }, - - // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). - { - map[string]string{"a": "b: c"}, - "a: 'b: c'\n", - }, -} - -func (s *S) TestMarshal(c *C) { - for _, item := range marshalTests { - data, err := yaml.Marshal(item.value) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, item.data) - } -} - -var marshalErrorTests = []struct { - value interface{} - error string - panic string -}{{ - value: &struct { - B int - inlineB ",inline" - }{1, inlineB{2, inlineC{3}}}, - panic: `Duplicated key 'b' in struct struct \{ B int; .*`, -}} - -func (s *S) TestMarshalErrors(c *C) { - for _, item := range marshalErrorTests { - if item.panic != "" { - c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) - } else { - _, err := yaml.Marshal(item.value) - c.Assert(err, ErrorMatches, item.error) - } - } -} - -func (s *S) TestMarshalTypeCache(c *C) { - var data []byte - var err error - func() { - type T struct{ A int } - data, err = yaml.Marshal(&T{}) - c.Assert(err, IsNil) - }() - func() { - type T struct{ B int } - data, err = yaml.Marshal(&T{}) - c.Assert(err, IsNil) - }() - c.Assert(string(data), Equals, "b: 0\n") -} - -var marshalerTests = []struct { - data string - value interface{} -}{ - {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, - {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, - {"_: 10\n", 10}, - {"_: null\n", nil}, - {"_: BAR!\n", "BAR!"}, -} - -type marshalerType struct { - value interface{} -} - -func (o marshalerType) MarshalText() ([]byte, error) { - panic("MarshalText called on type with MarshalYAML") -} - -func (o marshalerType) MarshalYAML() (interface{}, error) { - return o.value, nil -} - -type marshalerValue struct { - Field marshalerType "_" -} - -func (s *S) TestMarshaler(c *C) { - for _, item := range marshalerTests { - obj := &marshalerValue{} - obj.Field.value = item.value - data, err := yaml.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, string(item.data)) - } -} - -func (s *S) TestMarshalerWholeDocument(c *C) { - obj := &marshalerType{} - obj.value = map[string]string{"hello": "world!"} - data, err := yaml.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "hello: world!\n") -} - -type failingMarshaler struct{} - -func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { - return nil, failingErr -} - -func (s *S) TestMarshalerError(c *C) { - _, err := yaml.Marshal(&failingMarshaler{}) - c.Assert(err, Equals, failingErr) -} - -func (s *S) TestSortedOutput(c *C) { - order := []interface{}{ - false, - true, - 1, - uint(1), - 1.0, - 1.1, - 1.2, - 2, - uint(2), - 2.0, - 2.1, - "", - ".1", - ".2", - ".a", - "1", - "2", - "a!10", - "a/2", - "a/10", - "a~10", - "ab/1", - "b/1", - "b/01", - "b/2", - "b/02", - "b/3", - "b/03", - "b1", - "b01", - "b3", - "c2.10", - "c10.2", - "d1", - "d12", - "d12a", - } - m := make(map[interface{}]int) - for _, k := range order { - m[k] = 1 - } - data, err := yaml.Marshal(m) - c.Assert(err, IsNil) - out := "\n" + string(data) - last := 0 - for i, k := range order { - repr := fmt.Sprint(k) - if s, ok := k.(string); ok { - if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { - repr = `"` + repr + `"` - } - } - index := strings.Index(out, "\n"+repr+":") - if index == -1 { - c.Fatalf("%#v is not in the output: %#v", k, out) - } - if index < last { - c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) - } - last = index - } -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 0a7037ad..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1096 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } - return false -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index d5fb0972..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,391 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - } - buffer_len += width - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 93a86327..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,203 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "strconv" - "strings" - "unicode/utf8" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: - return true - } - return false -} - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, -int(intv) - } else { - return yaml_INT_TAG, -intv - } - } - } - // XXX Handle timestamps here. - - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - if tag == yaml_BINARY_TAG { - return yaml_BINARY_TAG, in - } - if utf8.ValidString(in) { - return yaml_STR_TAG, in - } - return yaml_BINARY_TAG, encodeBase64(in) -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index fe93b190..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2710 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each intendation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the intendation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found uknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && !(s[0] == '!' && s[1] == 0) { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the tag is non-empty. - if len(s) == 0 { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the intendation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") - return false - } - - // Get the intendation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the intendation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following intendation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan intendation spaces and line breaks for a block scalar. Determine the -// intendation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the intendation spaces and line breaks. - max_indent := 0 - for { - // Eat the intendation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the intendation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an intendation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab character that abuse intendation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate intendation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check intendation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 5958822f..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,104 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go deleted file mode 100644 index c5cf1ed4..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package yaml_test - -import ( - . "gopkg.in/check.v1" - "testing" -) - -func Test(t *testing.T) { TestingT(t) } - -type S struct{} - -var _ = Suite(&S{}) diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index 190362f2..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,89 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - // If the output encoding is UTF-8, we don't need to recode the buffer. - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - // Recode the buffer into the raw buffer. - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - // See the "reader.c" code for more details on UTF-8 encoding. Note - // that we assume that the buffer contains a valid UTF-8 sequence. - - // Read the next UTF-8 character. - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - pos += w - - // Write the character. - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - // Write the character using a surrogate pair (check "reader.c"). - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] - return true -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index e3e01edc..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,344 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - defer handleErr(&err) - d := newDecoder() - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only unmarshalled if they are exported (have an upper case -// first letter), and are unmarshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Does not apply to zero valued structs. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps. -// -// inline Inline the struct it's applied to, so its fields -// are processed as if they were part of the outer -// struct. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int "a,omitempty" -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshal("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - // TODO: Implement support for inline maps. - //case reflect.Map: - // if inlineMap >= 0 { - // return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - // } - // if field.Type.Key() != reflect.TypeOf("") { - // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - // } - // inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField()-1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index d60a6b6b..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,716 +0,0 @@ -package yaml - -import ( - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occured. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_file io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_file io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3c..00000000 --- a/vendor/github.com/docker/distribution/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index 0a8d77ae..8b2d8fde 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -28,14 +28,14 @@ ${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry +${PREFIX}/bin/digest: version/version.go $(shell find . -type f -name '*.go') + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest + ${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template -${PREFIX}/bin/dist: version/version.go $(shell find . -type f -name '*.go') - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/dist - docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template ./bin/registry-api-descriptor-template $< > $@ @@ -66,7 +66,7 @@ test-full: @echo "+ $@" @go test ./... -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/registry-api-descriptor-template ${PREFIX}/bin/dist +binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template @echo "+ $@" clean: diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md index ce215887..f7521669 100644 --- a/vendor/github.com/docker/distribution/README.md +++ b/vendor/github.com/docker/distribution/README.md @@ -15,7 +15,6 @@ This repository contains the following components: |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | | **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **dist** | An _experimental_ tool to provide distribution, oriented functionality without the `docker` daemon. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | | **documentation** | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md index cbf53881..9cdfa36c 100644 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -103,20 +103,20 @@ via IRC or the mailing list and we can talk about adding it. The goal here is to make sure that new features go through a rigid design process before landing in the registry. -##### Mirroring and Pull-through Caching +##### Proxying to other Registries -Mirroring and pull-through caching are related but slight different. We've -adopted the term _mirroring_ to be a proper mirror of a registry, meaning it -has all the content the upstream would have. Providing such mirrors in the -Docker ecosystem is dependent on a solid trust system, which is still in the -works. +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. -The more commonly helpful feature is _pull-through caching_, where data is -fetched from an upstream when not available in a local registry instance. +##### Metadata storage -Please see the following issues: - -- https://github.com/docker/distribution/issues/459 +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. ##### Peer to Peer transfer @@ -263,11 +263,5 @@ just the registry. ### Project Planning -Distribution Components map to Docker Platform Releases via the use of labels. Project Pages are used to define the set of features that are included in each Docker Platform Release. - -| Platform Version | Label | Planning | -|-----------|------|-----| -| Docker 1.6 | [Docker/1.6](https://github.com/docker/distribution/labels/docker%2F1.6) | [Project Page](https://github.com/docker/distribution/wiki/docker-1.6-Project-Page) | -| Docker 1.7| [Docker/1.7](https://github.com/docker/distribution/labels/docker%2F1.7) | [Project Page](https://github.com/docker/distribution/wiki/docker-1.7-Project-Page) | -| Docker 1.8| [Docker/1.8](https://github.com/docker/distribution/labels/docker%2F1.8) | [Project Page](https://github.com/docker/distribution/wiki/docker-1.8-Project-Page) | +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index 556bf93e..2087d0f9 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -27,9 +27,6 @@ var ( // ErrBlobInvalidLength returned when the blob has an expected length on // commit, meaning mismatched with the descriptor or an invalid value. ErrBlobInvalidLength = errors.New("blob invalid length") - - // ErrUnsupported returned when an unsupported operation is attempted - ErrUnsupported = errors.New("unsupported operation") ) // ErrBlobInvalidDigest returned when digest check fails. diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml index 7bd48373..4716eafa 100644 --- a/vendor/github.com/docker/distribution/circle.yml +++ b/vendor/github.com/docker/distribution/circle.yml @@ -21,7 +21,7 @@ machine: BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR # BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_rados include_oss" + DOCKER_BUILDTAGS: "include_rados include_oss include_gcs" # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" # Ceph config diff --git a/vendor/github.com/docker/distribution/cmd/dist/list.go b/vendor/github.com/docker/distribution/cmd/dist/list.go deleted file mode 100644 index e540d4d8..00000000 --- a/vendor/github.com/docker/distribution/cmd/dist/list.go +++ /dev/null @@ -1,14 +0,0 @@ -package main - -import "github.com/codegangsta/cli" - -var ( - commandList = cli.Command{ - Name: "images", - Usage: "List available images", - Action: imageList, - } -) - -func imageList(c *cli.Context) { -} diff --git a/vendor/github.com/docker/distribution/cmd/dist/main.go b/vendor/github.com/docker/distribution/cmd/dist/main.go deleted file mode 100644 index 34a2b514..00000000 --- a/vendor/github.com/docker/distribution/cmd/dist/main.go +++ /dev/null @@ -1,21 +0,0 @@ -package main - -import ( - "os" - - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "dist" - app.Usage = "Package and ship Docker content" - - app.Action = commandList.Action - app.Commands = []cli.Command{ - commandList, - commandPull, - commandPush, - } - app.Run(os.Args) -} diff --git a/vendor/github.com/docker/distribution/cmd/dist/pull.go b/vendor/github.com/docker/distribution/cmd/dist/pull.go deleted file mode 100644 index 8f96129c..00000000 --- a/vendor/github.com/docker/distribution/cmd/dist/pull.go +++ /dev/null @@ -1,21 +0,0 @@ -package main - -import "github.com/codegangsta/cli" - -var ( - commandPull = cli.Command{ - Name: "pull", - Usage: "Pull and verify an image from a registry", - Action: imagePull, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "r,registry", - Value: "hub.docker.io", - Usage: "Registry to use (e.g.: localhost:5000)", - }, - }, - } -) - -func imagePull(c *cli.Context) { -} diff --git a/vendor/github.com/docker/distribution/cmd/dist/push.go b/vendor/github.com/docker/distribution/cmd/dist/push.go deleted file mode 100644 index c39922aa..00000000 --- a/vendor/github.com/docker/distribution/cmd/dist/push.go +++ /dev/null @@ -1,21 +0,0 @@ -package main - -import "github.com/codegangsta/cli" - -var ( - commandPush = cli.Command{ - Name: "push", - Usage: "Push an image to a registry", - Action: imagePush, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "r,registry", - Value: "hub.docker.io", - Usage: "Registry to use (e.g.: localhost:5000)", - }, - }, - } -) - -func imagePush(*cli.Context) { -} diff --git a/vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go b/vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go deleted file mode 100644 index 05a1b487..00000000 --- a/vendor/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go +++ /dev/null @@ -1,127 +0,0 @@ -// registry-api-descriptor-template uses the APIDescriptor defined in the -// api/v2 package to execute templates passed to the command line. -// -// For example, to generate a new API specification, one would execute the -// following command from the repo root: -// -// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md -// -// The templates are passed in the api/v2.APIDescriptor object. Please see the -// package documentation for fields available on that object. The template -// syntax is from Go's standard library text/template package. For information -// on Go's template syntax, please see golang.org/pkg/text/template. -package main - -import ( - "log" - "net/http" - "os" - "path/filepath" - "regexp" - "text/template" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" -) - -var spaceRegex = regexp.MustCompile(`\n\s*`) - -func main() { - - if len(os.Args) != 2 { - log.Fatalln("please specify a template to execute.") - } - - path := os.Args[1] - filename := filepath.Base(path) - - funcMap := template.FuncMap{ - "removenewlines": func(s string) string { - return spaceRegex.ReplaceAllString(s, " ") - }, - "statustext": http.StatusText, - "prettygorilla": prettyGorillaMuxPath, - } - - tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) - - data := struct { - RouteDescriptors []v2.RouteDescriptor - ErrorDescriptors []errcode.ErrorDescriptor - }{ - RouteDescriptors: v2.APIDescriptor.RouteDescriptors, - ErrorDescriptors: errcode.GetErrorCodeGroup("registry.api.v2"), - } - - if err := tmpl.Execute(os.Stdout, data); err != nil { - log.Fatalln(err) - } -} - -// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux -// route string, making it suitable for documentation. -func prettyGorillaMuxPath(s string) string { - // Stateful parser that removes regular expressions from gorilla - // routes. It correctly handles balanced bracket pairs. - - var output string - var label string - var level int - -start: - if s[0] == '{' { - s = s[1:] - level++ - goto capture - } - - output += string(s[0]) - s = s[1:] - - goto end -capture: - switch s[0] { - case '{': - level++ - case '}': - level-- - - if level == 0 { - s = s[1:] - goto label - } - case ':': - s = s[1:] - goto skip - default: - label += string(s[0]) - } - s = s[1:] - goto capture -skip: - switch s[0] { - case '{': - level++ - case '}': - level-- - } - s = s[1:] - - if level == 0 { - goto label - } - - goto skip -label: - if label != "" { - output += "<" + label + ">" - label = "" - } -end: - if s != "" { - goto start - } - - return output - -} diff --git a/vendor/github.com/docker/distribution/cmd/registry/config-cache.yml b/vendor/github.com/docker/distribution/cmd/registry/config-cache.yml deleted file mode 100644 index 0b524043..00000000 --- a/vendor/github.com/docker/distribution/cmd/registry/config-cache.yml +++ /dev/null @@ -1,48 +0,0 @@ -version: 0.1 -log: - level: debug - fields: - service: registry - environment: development -storage: - cache: - blobdescriptor: redis - filesystem: - rootdirectory: /var/lib/registry-cache - maintenance: - uploadpurging: - enabled: false -http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 -redis: - addr: localhost:6379 - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms -notifications: - endpoints: - - name: local-8082 - url: http://localhost:5003/callback - headers: - Authorization: [Bearer ] - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - - name: local-8083 - url: http://localhost:8083/callback - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true -proxy: - remoteurl: https://registry-1.docker.io - username: username - password: password diff --git a/vendor/github.com/docker/distribution/cmd/registry/config-dev.yml b/vendor/github.com/docker/distribution/cmd/registry/config-dev.yml deleted file mode 100644 index 3f4616d8..00000000 --- a/vendor/github.com/docker/distribution/cmd/registry/config-dev.yml +++ /dev/null @@ -1,60 +0,0 @@ -version: 0.1 -log: - level: debug - fields: - service: registry - environment: development - hooks: - - type: mail - disabled: true - levels: - - panic - options: - smtp: - addr: mail.example.com:25 - username: mailuser - password: password - insecure: true - from: sender@example.com - to: - - errors@example.com -storage: - delete: - enabled: true - cache: - blobdescriptor: redis - filesystem: - rootdirectory: /var/lib/registry - maintenance: - uploadpurging: - enabled: false -http: - addr: :5000 - debug: - addr: localhost:5001 -redis: - addr: localhost:6379 - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms -notifications: - endpoints: - - name: local-5003 - url: http://localhost:5003/callback - headers: - Authorization: [Bearer ] - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - - name: local-8083 - url: http://localhost:8083/callback - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - diff --git a/vendor/github.com/docker/distribution/cmd/registry/config-example.yml b/vendor/github.com/docker/distribution/cmd/registry/config-example.yml deleted file mode 100644 index cb91e63d..00000000 --- a/vendor/github.com/docker/distribution/cmd/registry/config-example.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 0.1 -log: - fields: - service: registry -storage: - cache: - layerinfo: inmemory - filesystem: - rootdirectory: /var/lib/registry -http: - addr: :5000 diff --git a/vendor/github.com/docker/distribution/cmd/registry/rados.go b/vendor/github.com/docker/distribution/cmd/registry/rados.go deleted file mode 100644 index e7ea770a..00000000 --- a/vendor/github.com/docker/distribution/cmd/registry/rados.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build include_rados - -package main - -import _ "github.com/docker/distribution/registry/storage/driver/rados" diff --git a/vendor/github.com/docker/distribution/configuration/configuration.go b/vendor/github.com/docker/distribution/configuration/configuration.go deleted file mode 100644 index 502dab3e..00000000 --- a/vendor/github.com/docker/distribution/configuration/configuration.go +++ /dev/null @@ -1,486 +0,0 @@ -package configuration - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strings" - "time" -) - -// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and -// optionally modified by environment variables -type Configuration struct { - // Version is the version which defines the format of the rest of the configuration - Version Version `yaml:"version"` - - // Log supports setting various parameters related to the logging - // subsystem. - Log struct { - // Level is the granularity at which registry operations are logged. - Level Loglevel `yaml:"level"` - - // Formatter overrides the default formatter with another. Options - // include "text", "json" and "logstash". - Formatter string `yaml:"formatter,omitempty"` - - // Fields allows users to specify static string fields to include in - // the logger context. - Fields map[string]interface{} `yaml:"fields,omitempty"` - - // Hooks allows users to configurate the log hooks, to enabling the - // sequent handling behavior, when defined levels of log message emit. - Hooks []LogHook `yaml:"hooks,omitempty"` - } - - // Loglevel is the level at which registry operations are logged. This is - // deprecated. Please use Log.Level in the future. - Loglevel Loglevel `yaml:"loglevel,omitempty"` - - // Storage is the configuration for the registry's storage driver - Storage Storage `yaml:"storage"` - - // Auth allows configuration of various authorization methods that may be - // used to gate requests. - Auth Auth `yaml:"auth,omitempty"` - - // Middleware lists all middlewares to be used by the registry. - Middleware map[string][]Middleware `yaml:"middleware,omitempty"` - - // Reporting is the configuration for error reporting - Reporting Reporting `yaml:"reporting,omitempty"` - - // HTTP contains configuration parameters for the registry's http - // interface. - HTTP struct { - // Addr specifies the bind address for the registry instance. - Addr string `yaml:"addr,omitempty"` - - // Net specifies the net portion of the bind address. A default empty value means tcp. - Net string `yaml:"net,omitempty"` - - Prefix string `yaml:"prefix,omitempty"` - - // Secret specifies the secret key which HMAC tokens are created with. - Secret string `yaml:"secret,omitempty"` - - // TLS instructs the http server to listen with a TLS configuration. - // This only support simple tls configuration with a cert and key. - // Mostly, this is useful for testing situations or simple deployments - // that require tls. If more complex configurations are required, use - // a proxy or make a proposal to add support here. - TLS struct { - // Certificate specifies the path to an x509 certificate file to - // be used for TLS. - Certificate string `yaml:"certificate,omitempty"` - - // Key specifies the path to the x509 key file, which should - // contain the private portion for the file specified in - // Certificate. - Key string `yaml:"key,omitempty"` - - // Specifies the CA certs for client authentication - // A file may contain multiple CA certificates encoded as PEM - ClientCAs []string `yaml:"clientcas,omitempty"` - } `yaml:"tls,omitempty"` - - // Debug configures the http debug interface, if specified. This can - // include services such as pprof, expvar and other data that should - // not be exposed externally. Left disabled by default. - Debug struct { - // Addr specifies the bind address for the debug server. - Addr string `yaml:"addr,omitempty"` - } `yaml:"debug,omitempty"` - } `yaml:"http,omitempty"` - - // Notifications specifies configuration about various endpoint to which - // registry events are dispatched. - Notifications Notifications `yaml:"notifications,omitempty"` - - // Redis configures the redis pool available to the registry webapp. - Redis struct { - // Addr specifies the the redis instance available to the application. - Addr string `yaml:"addr,omitempty"` - - // Password string to use when making a connection. - Password string `yaml:"password,omitempty"` - - // DB specifies the database to connect to on the redis instance. - DB int `yaml:"db,omitempty"` - - DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect - ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data - WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data - - // Pool configures the behavior of the redis connection pool. - Pool struct { - // MaxIdle sets the maximum number of idle connections. - MaxIdle int `yaml:"maxidle,omitempty"` - - // MaxActive sets the maximum number of connections that should be - // opened before blocking a connection request. - MaxActive int `yaml:"maxactive,omitempty"` - - // IdleTimeout sets the amount time to wait before closing - // inactive connections. - IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` - } `yaml:"pool,omitempty"` - } `yaml:"redis,omitempty"` - - Proxy Proxy `yaml:"proxy,omitempty"` -} - -// LogHook is composed of hook Level and Type. -// After hooks configuration, it can execute the next handling automatically, -// when defined levels of log message emitted. -// Example: hook can sending an email notification when error log happens in app. -type LogHook struct { - // Disable lets user select to enable hook or not. - Disabled bool `yaml:"disabled,omitempty"` - - // Type allows user to select which type of hook handler they want. - Type string `yaml:"type,omitempty"` - - // Levels set which levels of log message will let hook executed. - Levels []string `yaml:"levels,omitempty"` - - // MailOptions allows user to configurate email parameters. - MailOptions MailOptions `yaml:"options,omitempty"` -} - -// MailOptions provides the configuration sections to user, for specific handler. -type MailOptions struct { - SMTP struct { - // Addr defines smtp host address - Addr string `yaml:"addr,omitempty"` - - // Username defines user name to smtp host - Username string `yaml:"username,omitempty"` - - // Password defines password of login user - Password string `yaml:"password,omitempty"` - - // Insecure defines if smtp login skips the secure cerification. - Insecure bool `yaml:"insecure,omitempty"` - } `yaml:"smtp,omitempty"` - - // From defines mail sending address - From string `yaml:"from,omitempty"` - - // To defines mail receiving address - To []string `yaml:"to,omitempty"` -} - -// v0_1Configuration is a Version 0.1 Configuration struct -// This is currently aliased to Configuration, as it is the current version -type v0_1Configuration Configuration - -// UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints -func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { - var versionString string - err := unmarshal(&versionString) - if err != nil { - return err - } - - newVersion := Version(versionString) - if _, err := newVersion.major(); err != nil { - return err - } - - if _, err := newVersion.minor(); err != nil { - return err - } - - *version = newVersion - return nil -} - -// CurrentVersion is the most recent Version that can be parsed -var CurrentVersion = MajorMinorVersion(0, 1) - -// Loglevel is the level at which operations are logged -// This can be error, warn, info, or debug -type Loglevel string - -// UnmarshalYAML implements the yaml.Umarshaler interface -// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a -// valid loglevel -func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { - var loglevelString string - err := unmarshal(&loglevelString) - if err != nil { - return err - } - - loglevelString = strings.ToLower(loglevelString) - switch loglevelString { - case "error", "warn", "info", "debug": - default: - return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) - } - - *loglevel = Loglevel(loglevelString) - return nil -} - -// Parameters defines a key-value parameters mapping -type Parameters map[string]interface{} - -// Storage defines the configuration for registry object storage -type Storage map[string]Parameters - -// Type returns the storage driver type, such as filesystem or s3 -func (storage Storage) Type() string { - // Return only key in this map - for k := range storage { - switch k { - case "maintenance": - // allow configuration of maintenance - case "cache": - // allow configuration of caching - case "delete": - // allow configuration of delete - case "redirect": - // allow configuration of redirect - default: - return k - } - } - return "" -} - -// Parameters returns the Parameters map for a Storage configuration -func (storage Storage) Parameters() Parameters { - return storage[storage.Type()] -} - -// setParameter changes the parameter at the provided key to the new value -func (storage Storage) setParameter(key string, value interface{}) { - storage[storage.Type()][key] = value -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters -func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { - var storageMap map[string]Parameters - err := unmarshal(&storageMap) - if err == nil { - if len(storageMap) > 1 { - types := make([]string, 0, len(storageMap)) - for k := range storageMap { - switch k { - case "maintenance": - // allow for configuration of maintenance - case "cache": - // allow configuration of caching - case "delete": - // allow configuration of delete - case "redirect": - // allow configuration of redirect - default: - types = append(types, k) - } - } - - if len(types) > 1 { - return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) - } - } - *storage = storageMap - return nil - } - - var storageType string - err = unmarshal(&storageType) - if err == nil { - *storage = Storage{storageType: Parameters{}} - return nil - } - - return err -} - -// MarshalYAML implements the yaml.Marshaler interface -func (storage Storage) MarshalYAML() (interface{}, error) { - if storage.Parameters() == nil { - return storage.Type(), nil - } - return map[string]Parameters(storage), nil -} - -// Auth defines the configuration for registry authorization. -type Auth map[string]Parameters - -// Type returns the storage driver type, such as filesystem or s3 -func (auth Auth) Type() string { - // Return only key in this map - for k := range auth { - return k - } - return "" -} - -// Parameters returns the Parameters map for an Auth configuration -func (auth Auth) Parameters() Parameters { - return auth[auth.Type()] -} - -// setParameter changes the parameter at the provided key to the new value -func (auth Auth) setParameter(key string, value interface{}) { - auth[auth.Type()][key] = value -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters -func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]Parameters - err := unmarshal(&m) - if err == nil { - if len(m) > 1 { - types := make([]string, 0, len(m)) - for k := range m { - types = append(types, k) - } - - // TODO(stevvooe): May want to change this slightly for - // authorization to allow multiple challenges. - return fmt.Errorf("must provide exactly one type. Provided: %v", types) - - } - *auth = m - return nil - } - - var authType string - err = unmarshal(&authType) - if err == nil { - *auth = Auth{authType: Parameters{}} - return nil - } - - return err -} - -// MarshalYAML implements the yaml.Marshaler interface -func (auth Auth) MarshalYAML() (interface{}, error) { - if auth.Parameters() == nil { - return auth.Type(), nil - } - return map[string]Parameters(auth), nil -} - -// Notifications configures multiple http endpoints. -type Notifications struct { - // Endpoints is a list of http configurations for endpoints that - // respond to webhook notifications. In the future, we may allow other - // kinds of endpoints, such as external queues. - Endpoints []Endpoint `yaml:"endpoints,omitempty"` -} - -// Endpoint describes the configuration of an http webhook notification -// endpoint. -type Endpoint struct { - Name string `yaml:"name"` // identifies the endpoint in the registry instance. - Disabled bool `yaml:"disabled"` // disables the endpoint - URL string `yaml:"url"` // post url for the endpoint. - Headers http.Header `yaml:"headers"` // static headers that should be added to all requests - Timeout time.Duration `yaml:"timeout"` // HTTP timeout - Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure - Backoff time.Duration `yaml:"backoff"` // backoff duration -} - -// Reporting defines error reporting methods. -type Reporting struct { - // Bugsnag configures error reporting for Bugsnag (bugsnag.com). - Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` - // NewRelic configures error reporting for NewRelic (newrelic.com) - NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` -} - -// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). -type BugsnagReporting struct { - // APIKey is the Bugsnag api key. - APIKey string `yaml:"apikey,omitempty"` - // ReleaseStage tracks where the registry is deployed. - // Examples: production, staging, development - ReleaseStage string `yaml:"releasestage,omitempty"` - // Endpoint is used for specifying an enterprise Bugsnag endpoint. - Endpoint string `yaml:"endpoint,omitempty"` -} - -// NewRelicReporting configures error reporting for NewRelic (newrelic.com) -type NewRelicReporting struct { - // LicenseKey is the NewRelic user license key - LicenseKey string `yaml:"licensekey,omitempty"` - // Name is the component name of the registry in NewRelic - Name string `yaml:"name,omitempty"` - // Verbose configures debug output to STDOUT - Verbose bool `yaml:"verbose,omitempty"` -} - -// Middleware configures named middlewares to be applied at injection points. -type Middleware struct { - // Name the middleware registers itself as - Name string `yaml:"name"` - // Flag to disable middleware easily - Disabled bool `yaml:"disabled,omitempty"` - // Map of parameters that will be passed to the middleware's initialization function - Options Parameters `yaml:"options"` -} - -// Proxy configures the registry as a pull through cache -type Proxy struct { - // RemoteURL is the URL of the remote registry - RemoteURL string `yaml:"remoteurl"` - - // Username of the hub user - Username string `yaml:"username"` - - // Password of the hub user - Password string `yaml:"password"` -} - -// Parse parses an input configuration yaml document into a Configuration struct -// This should generally be capable of handling old configuration format versions -// -// Environment variables may be used to override configuration parameters other than version, -// following the scheme below: -// Configuration.Abc may be replaced by the value of REGISTRY_ABC, -// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth -func Parse(rd io.Reader) (*Configuration, error) { - in, err := ioutil.ReadAll(rd) - if err != nil { - return nil, err - } - - p := NewParser("registry", []VersionedParseInfo{ - { - Version: MajorMinorVersion(0, 1), - ParseAs: reflect.TypeOf(v0_1Configuration{}), - ConversionFunc: func(c interface{}) (interface{}, error) { - if v0_1, ok := c.(*v0_1Configuration); ok { - if v0_1.Loglevel == Loglevel("") { - v0_1.Loglevel = Loglevel("info") - } - if v0_1.Storage.Type() == "" { - return nil, fmt.Errorf("No storage configuration provided") - } - return (*Configuration)(v0_1), nil - } - return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) - }, - }, - }) - - config := new(Configuration) - err = p.Parse(in, config) - if err != nil { - return nil, err - } - - return config, nil -} diff --git a/vendor/github.com/docker/distribution/configuration/configuration_test.go b/vendor/github.com/docker/distribution/configuration/configuration_test.go deleted file mode 100644 index 24076e2c..00000000 --- a/vendor/github.com/docker/distribution/configuration/configuration_test.go +++ /dev/null @@ -1,370 +0,0 @@ -package configuration - -import ( - "bytes" - "net/http" - "os" - "testing" - - . "gopkg.in/check.v1" - "gopkg.in/yaml.v2" -) - -// Hook up gocheck into the "go test" runner -func Test(t *testing.T) { TestingT(t) } - -// configStruct is a canonical example configuration, which should map to configYamlV0_1 -var configStruct = Configuration{ - Version: "0.1", - Log: struct { - Level Loglevel `yaml:"level"` - Formatter string `yaml:"formatter,omitempty"` - Fields map[string]interface{} `yaml:"fields,omitempty"` - Hooks []LogHook `yaml:"hooks,omitempty"` - }{ - Fields: map[string]interface{}{"environment": "test"}, - }, - Loglevel: "info", - Storage: Storage{ - "s3": Parameters{ - "region": "us-east-1", - "bucket": "my-bucket", - "rootdirectory": "/registry", - "encrypt": true, - "secure": false, - "accesskey": "SAMPLEACCESSKEY", - "secretkey": "SUPERSECRET", - "host": nil, - "port": 42, - }, - }, - Auth: Auth{ - "silly": Parameters{ - "realm": "silly", - "service": "silly", - }, - }, - Reporting: Reporting{ - Bugsnag: BugsnagReporting{ - APIKey: "BugsnagApiKey", - }, - }, - Notifications: Notifications{ - Endpoints: []Endpoint{ - { - Name: "endpoint-1", - URL: "http://example.com", - Headers: http.Header{ - "Authorization": []string{"Bearer "}, - }, - }, - }, - }, - HTTP: struct { - Addr string `yaml:"addr,omitempty"` - Net string `yaml:"net,omitempty"` - Prefix string `yaml:"prefix,omitempty"` - Secret string `yaml:"secret,omitempty"` - TLS struct { - Certificate string `yaml:"certificate,omitempty"` - Key string `yaml:"key,omitempty"` - ClientCAs []string `yaml:"clientcas,omitempty"` - } `yaml:"tls,omitempty"` - Debug struct { - Addr string `yaml:"addr,omitempty"` - } `yaml:"debug,omitempty"` - }{ - TLS: struct { - Certificate string `yaml:"certificate,omitempty"` - Key string `yaml:"key,omitempty"` - ClientCAs []string `yaml:"clientcas,omitempty"` - }{ - ClientCAs: []string{"/path/to/ca.pem"}, - }, - }, -} - -// configYamlV0_1 is a Version 0.1 yaml document representing configStruct -var configYamlV0_1 = ` -version: 0.1 -log: - fields: - environment: test -loglevel: info -storage: - s3: - region: us-east-1 - bucket: my-bucket - rootdirectory: /registry - encrypt: true - secure: false - accesskey: SAMPLEACCESSKEY - secretkey: SUPERSECRET - host: ~ - port: 42 -auth: - silly: - realm: silly - service: silly -notifications: - endpoints: - - name: endpoint-1 - url: http://example.com - headers: - Authorization: [Bearer ] -reporting: - bugsnag: - apikey: BugsnagApiKey -http: - clientcas: - - /path/to/ca.pem -` - -// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory -// storage driver with no parameters -var inmemoryConfigYamlV0_1 = ` -version: 0.1 -loglevel: info -storage: inmemory -auth: - silly: - realm: silly - service: silly -notifications: - endpoints: - - name: endpoint-1 - url: http://example.com - headers: - Authorization: [Bearer ] -` - -type ConfigSuite struct { - expectedConfig *Configuration -} - -var _ = Suite(new(ConfigSuite)) - -func (suite *ConfigSuite) SetUpTest(c *C) { - os.Clearenv() - suite.expectedConfig = copyConfig(configStruct) -} - -// TestMarshalRoundtrip validates that configStruct can be marshaled and -// unmarshaled without changing any parameters -func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { - configBytes, err := yaml.Marshal(suite.expectedConfig) - c.Assert(err, IsNil) - config, err := Parse(bytes.NewReader(configBytes)) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseSimple validates that configYamlV0_1 can be parsed into a struct -// matching configStruct -func (suite *ConfigSuite) TestParseSimple(c *C) { - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseInmemory validates that configuration yaml with storage provided as -// a string can be parsed into a Configuration struct with no storage parameters -func (suite *ConfigSuite) TestParseInmemory(c *C) { - suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} - suite.expectedConfig.Reporting = Reporting{} - suite.expectedConfig.Log.Fields = nil - - config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseIncomplete validates that an incomplete yaml configuration cannot -// be parsed without providing environment variables to fill in the missing -// components. -func (suite *ConfigSuite) TestParseIncomplete(c *C) { - incompleteConfigYaml := "version: 0.1" - _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) - c.Assert(err, NotNil) - - suite.expectedConfig.Log.Fields = nil - suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} - suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} - suite.expectedConfig.Reporting = Reporting{} - suite.expectedConfig.Notifications = Notifications{} - - os.Setenv("REGISTRY_STORAGE", "filesystem") - os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") - os.Setenv("REGISTRY_AUTH", "silly") - os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly") - - config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithSameEnvStorage validates that providing environment variables -// that match the given storage type will only include environment-defined -// parameters and remove yaml-defined parameters -func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { - suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}} - - os.Setenv("REGISTRY_STORAGE", "s3") - os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change -// and add to the given storage parameters will change and add parameters to the parsed -// Configuration struct -func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { - suite.expectedConfig.Storage.setParameter("region", "us-west-1") - suite.expectedConfig.Storage.setParameter("secure", true) - suite.expectedConfig.Storage.setParameter("newparam", "some Value") - - os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") - os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") - os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithDifferentEnvStorageType validates that providing an environment variable that -// changes the storage type will be reflected in the parsed Configuration struct -func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { - suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} - - os.Setenv("REGISTRY_STORAGE", "inmemory") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithExtraneousEnvStorageParams validates that environment variables -// that change parameters out of the scope of the specified storage type are -// ignored. -func (suite *ConfigSuite) TestParseWithExtraneousEnvStorageParams(c *C) { - os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable -// that changes the storage type will be reflected in the parsed Configuration struct and that -// environment storage parameters will also be included -func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { - suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}} - suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot") - - os.Setenv("REGISTRY_STORAGE", "filesystem") - os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log -// level to the same as the one provided in the yaml will not change the parsed Configuration struct -func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { - os.Setenv("REGISTRY_LOGLEVEL", "info") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the -// log level will override the value provided in the yaml document -func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { - suite.expectedConfig.Loglevel = "error" - - os.Setenv("REGISTRY_LOGLEVEL", "error") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseInvalidLoglevel validates that the parser will fail to parse a -// configuration if the loglevel is malformed -func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) { - invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory" - _, err := Parse(bytes.NewReader([]byte(invalidConfigYaml))) - c.Assert(err, NotNil) - - os.Setenv("REGISTRY_LOGLEVEL", "derp") - - _, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, NotNil) - -} - -// TestParseWithDifferentEnvReporting validates that environment variables -// properly override reporting parameters -func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) { - suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" - suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" - suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey" - suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME" - - os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey") - os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") - os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") - os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME") - - config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) - c.Assert(err, IsNil) - c.Assert(config, DeepEquals, suite.expectedConfig) -} - -// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration -// version than the CurrentVersion -func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { - suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) - configBytes, err := yaml.Marshal(suite.expectedConfig) - c.Assert(err, IsNil) - _, err = Parse(bytes.NewReader(configBytes)) - c.Assert(err, NotNil) -} - -func copyConfig(config Configuration) *Configuration { - configCopy := new(Configuration) - - configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor()) - configCopy.Loglevel = config.Loglevel - configCopy.Log = config.Log - configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields)) - for k, v := range config.Log.Fields { - configCopy.Log.Fields[k] = v - } - - configCopy.Storage = Storage{config.Storage.Type(): Parameters{}} - for k, v := range config.Storage.Parameters() { - configCopy.Storage.setParameter(k, v) - } - configCopy.Reporting = Reporting{ - Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, - NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose}, - } - - configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} - for k, v := range config.Auth.Parameters() { - configCopy.Auth.setParameter(k, v) - } - - configCopy.Notifications = Notifications{Endpoints: []Endpoint{}} - for _, v := range config.Notifications.Endpoints { - configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v) - } - - return configCopy -} diff --git a/vendor/github.com/docker/distribution/configuration/parser.go b/vendor/github.com/docker/distribution/configuration/parser.go deleted file mode 100644 index 10a0461e..00000000 --- a/vendor/github.com/docker/distribution/configuration/parser.go +++ /dev/null @@ -1,203 +0,0 @@ -package configuration - -import ( - "fmt" - "os" - "reflect" - "regexp" - "strconv" - "strings" - - "gopkg.in/yaml.v2" -) - -// Version is a major/minor version pair of the form Major.Minor -// Major version upgrades indicate structure or type changes -// Minor version upgrades should be strictly additive -type Version string - -// MajorMinorVersion constructs a Version from its Major and Minor components -func MajorMinorVersion(major, minor uint) Version { - return Version(fmt.Sprintf("%d.%d", major, minor)) -} - -func (version Version) major() (uint, error) { - majorPart := strings.Split(string(version), ".")[0] - major, err := strconv.ParseUint(majorPart, 10, 0) - return uint(major), err -} - -// Major returns the major version portion of a Version -func (version Version) Major() uint { - major, _ := version.major() - return major -} - -func (version Version) minor() (uint, error) { - minorPart := strings.Split(string(version), ".")[1] - minor, err := strconv.ParseUint(minorPart, 10, 0) - return uint(minor), err -} - -// Minor returns the minor version portion of a Version -func (version Version) Minor() uint { - minor, _ := version.minor() - return minor -} - -// VersionedParseInfo defines how a specific version of a configuration should -// be parsed into the current version -type VersionedParseInfo struct { - // Version is the version which this parsing information relates to - Version Version - // ParseAs defines the type which a configuration file of this version - // should be parsed into - ParseAs reflect.Type - // ConversionFunc defines a method for converting the parsed configuration - // (of type ParseAs) into the current configuration version - // Note: this method signature is very unclear with the absence of generics - ConversionFunc func(interface{}) (interface{}, error) -} - -// Parser can be used to parse a configuration file and environment of a defined -// version into a unified output structure -type Parser struct { - prefix string - mapping map[Version]VersionedParseInfo - env map[string]string -} - -// NewParser returns a *Parser with the given environment prefix which handles -// versioned configurations which match the given parseInfos -func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { - p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo), env: make(map[string]string)} - - for _, parseInfo := range parseInfos { - p.mapping[parseInfo.Version] = parseInfo - } - - for _, env := range os.Environ() { - envParts := strings.SplitN(env, "=", 2) - p.env[envParts[0]] = envParts[1] - } - - return &p -} - -// Parse reads in the given []byte and environment and writes the resulting -// configuration into the input v -// -// Environment variables may be used to override configuration parameters other -// than version, following the scheme below: -// v.Abc may be replaced by the value of PREFIX_ABC, -// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth -func (p *Parser) Parse(in []byte, v interface{}) error { - var versionedStruct struct { - Version Version - } - - if err := yaml.Unmarshal(in, &versionedStruct); err != nil { - return err - } - - parseInfo, ok := p.mapping[versionedStruct.Version] - if !ok { - return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) - } - - parseAs := reflect.New(parseInfo.ParseAs) - err := yaml.Unmarshal(in, parseAs.Interface()) - if err != nil { - return err - } - - err = p.overwriteFields(parseAs, p.prefix) - if err != nil { - return err - } - - c, err := parseInfo.ConversionFunc(parseAs.Interface()) - if err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) - return nil -} - -func (p *Parser) overwriteFields(v reflect.Value, prefix string) error { - for v.Kind() == reflect.Ptr { - v = reflect.Indirect(v) - } - switch v.Kind() { - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - sf := v.Type().Field(i) - fieldPrefix := strings.ToUpper(prefix + "_" + sf.Name) - if e, ok := p.env[fieldPrefix]; ok { - fieldVal := reflect.New(sf.Type) - err := yaml.Unmarshal([]byte(e), fieldVal.Interface()) - if err != nil { - return err - } - v.Field(i).Set(reflect.Indirect(fieldVal)) - } - err := p.overwriteFields(v.Field(i), fieldPrefix) - if err != nil { - return err - } - } - case reflect.Map: - p.overwriteMap(v, prefix) - } - return nil -} - -func (p *Parser) overwriteMap(m reflect.Value, prefix string) error { - switch m.Type().Elem().Kind() { - case reflect.Struct: - for _, k := range m.MapKeys() { - err := p.overwriteFields(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k))) - if err != nil { - return err - } - } - envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix))) - if err != nil { - return err - } - for key, val := range p.env { - if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil { - mapValue := reflect.New(m.Type().Elem()) - err := yaml.Unmarshal([]byte(val), mapValue.Interface()) - if err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue)) - } - } - case reflect.Map: - for _, k := range m.MapKeys() { - err := p.overwriteMap(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k))) - if err != nil { - return err - } - } - default: - envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix))) - if err != nil { - return err - } - - for key, val := range p.env { - if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil { - mapValue := reflect.New(m.Type().Elem()) - err := yaml.Unmarshal([]byte(val), mapValue.Interface()) - if err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue)) - } - } - } - return nil -} diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go index a63989e5..6fe1f817 100644 --- a/vendor/github.com/docker/distribution/context/doc.go +++ b/vendor/github.com/docker/distribution/context/doc.go @@ -3,6 +3,19 @@ // logging relevent request information but this package is not limited to // that purpose. // +// The easiest way to get started is to get the background context: +// +// ctx := context.Background() +// +// The returned context should be passed around your application and be the +// root of all other context instances. If the application has a version, this +// line should be called before anything else: +// +// ctx := context.WithVersion(context.Background(), version) +// +// The above will store the version in the context and will be available to +// the logger. +// // Logging // // The most useful aspect of this package is GetLogger. This function takes diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go index f61e3bc2..2cb1d041 100644 --- a/vendor/github.com/docker/distribution/context/http.go +++ b/vendor/github.com/docker/distribution/context/http.go @@ -103,17 +103,21 @@ func GetRequestID(ctx Context) string { // WithResponseWriter returns a new context and response writer that makes // interesting response statistics available within the context. func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { - closeNotifier, ok := w.(http.CloseNotifier) - if !ok { - panic("the ResponseWriter does not implement CloseNotifier") - } - irw := &instrumentedResponseWriter{ + irw := instrumentedResponseWriter{ ResponseWriter: w, - CloseNotifier: closeNotifier, Context: ctx, } - return irw, irw + if closeNotifier, ok := w.(http.CloseNotifier); ok { + irwCN := &instrumentedResponseWriterCN{ + instrumentedResponseWriter: irw, + CloseNotifier: closeNotifier, + } + + return irwCN, irwCN + } + + return &irw, &irw } // GetResponseWriter returns the http.ResponseWriter from the provided @@ -263,11 +267,19 @@ func (ctx *muxVarsContext) Value(key interface{}) interface{} { return ctx.Context.Value(key) } +// instrumentedResponseWriterCN provides response writer information in a +// context. It implements http.CloseNotifier so that users can detect +// early disconnects. +type instrumentedResponseWriterCN struct { + instrumentedResponseWriter + http.CloseNotifier +} + // instrumentedResponseWriter provides response writer information in a -// context. +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. type instrumentedResponseWriter struct { http.ResponseWriter - http.CloseNotifier Context mu sync.Mutex @@ -340,3 +352,13 @@ func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { fallback: return irw.Context.Value(key) } + +func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + } + + return irw.instrumentedResponseWriter.Value(key) +} diff --git a/vendor/github.com/docker/distribution/context/http_test.go b/vendor/github.com/docker/distribution/context/http_test.go index ae88a314..3d4b3c8e 100644 --- a/vendor/github.com/docker/distribution/context/http_test.go +++ b/vendor/github.com/docker/distribution/context/http_test.go @@ -110,13 +110,6 @@ func (trw *testResponseWriter) Header() http.Header { return trw.header } -// CloseNotify is only here to make the testResponseWriter implement the -// http.CloseNotifier interface, which WithResponseWriter expects to be -// implemented. -func (trw *testResponseWriter) CloseNotify() <-chan bool { - return make(chan bool) -} - func (trw *testResponseWriter) Write(p []byte) (n int, err error) { if trw.status == 0 { trw.status = http.StatusOK diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go index 78e4212a..fbb6a051 100644 --- a/vendor/github.com/docker/distribution/context/logger.go +++ b/vendor/github.com/docker/distribution/context/logger.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/Sirupsen/logrus" + "runtime" ) // Logger provides a leveled-logging interface. @@ -54,8 +55,14 @@ func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{} // GetLoggerWithFields returns a logger instance with the specified fields // without affecting the context. Extra specified keys will be resolved from // the context. -func GetLoggerWithFields(ctx Context, fields map[string]interface{}, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...).WithFields(logrus.Fields(fields)) +func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { + // must convert from interface{} -> interface{} to string -> interface{} for logrus. + lfields := make(logrus.Fields, len(fields)) + for key, value := range fields { + lfields[fmt.Sprint(key)] = value + } + + return getLogrusLogger(ctx, keys...).WithFields(lfields) } // GetLogger returns the logger from the current context, if present. If one @@ -84,12 +91,20 @@ func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { } if logger == nil { + fields := logrus.Fields{} + + // Fill in the instance id, if we have it. + instanceID := ctx.Value("instance.id") + if instanceID != nil { + fields["instance.id"] = instanceID + } + + fields["go.version"] = runtime.Version() // If no logger is found, just return the standard logger. - logger = logrus.NewEntry(logrus.StandardLogger()) + logger = logrus.StandardLogger().WithFields(fields) } fields := logrus.Fields{} - for _, key := range keys { v := ctx.Value(key) if v != nil { diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go index c0aff00d..299edc00 100644 --- a/vendor/github.com/docker/distribution/context/util.go +++ b/vendor/github.com/docker/distribution/context/util.go @@ -20,7 +20,7 @@ func Since(ctx Context, key interface{}) time.Duration { // GetStringValue returns a string value from the context. The empty string // will be returned if not found. -func GetStringValue(ctx Context, key string) (value string) { +func GetStringValue(ctx Context, key interface{}) (value string) { stringi := ctx.Value(key) if stringi != nil { if valuev, ok := stringi.(string); ok { diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go new file mode 100644 index 00000000..746cda02 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/version.go @@ -0,0 +1,16 @@ +package context + +// WithVersion stores the application version in the context. The new context +// gets a logger to ensure log messages are marked with the application +// version. +func WithVersion(ctx Context, version string) Context { + ctx = WithValue(ctx, "version", version) + // push a new logger onto the stack + return WithLogger(ctx, GetLogger(ctx, "version")) +} + +// GetVersion returns the application version from the context. An empty +// string may returned if the version was not set on the context. +func GetVersion(ctx Context) string { + return GetStringValue(ctx, "version") +} diff --git a/vendor/github.com/docker/distribution/context/version_test.go b/vendor/github.com/docker/distribution/context/version_test.go new file mode 100644 index 00000000..b8165269 --- /dev/null +++ b/vendor/github.com/docker/distribution/context/version_test.go @@ -0,0 +1,19 @@ +package context + +import "testing" + +func TestVersionContext(t *testing.T) { + ctx := Background() + + if GetVersion(ctx) != "" { + t.Fatalf("context should not yet have a version") + } + + expected := "2.1-whatever" + ctx = WithVersion(ctx, expected) + version := GetVersion(ctx) + + if version != expected { + t.Fatalf("version was not set: %q != %q", version, expected) + } +} diff --git a/vendor/github.com/docker/distribution/contrib/apache/README.MD b/vendor/github.com/docker/distribution/contrib/apache/README.MD deleted file mode 100644 index f7e14b5b..00000000 --- a/vendor/github.com/docker/distribution/contrib/apache/README.MD +++ /dev/null @@ -1,36 +0,0 @@ -# Apache HTTPd sample for Registry v1, v2 and mirror - -3 containers involved - -* Docker Registry v1 (registry 0.9.1) -* Docker Registry v2 (registry 2.0.0) -* Docker Registry v1 in mirror mode - -HTTP for mirror and HTTPS for v1 & v2 - -* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode -* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode - -## 3 Docker containers should be started - -* Docker Registry 1.0 in Mirror mode : port 5001 -* Docker Registry 1.0 in Hosting mode : port 5000 -* Docker Registry 2.0 in Hosting mode : port 5002 - -### Registry v1 - - docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1" - -### Mirror - - docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \ - -e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1" - -### Registry v2 - - docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2.0" - -# For Hosting mode access - -* users should have account (valid-user) to be able to fetch images -* only users using account docker-deployer will be allowed to push images diff --git a/vendor/github.com/docker/distribution/contrib/apache/apache.conf b/vendor/github.com/docker/distribution/contrib/apache/apache.conf deleted file mode 100644 index 3300a7c0..00000000 --- a/vendor/github.com/docker/distribution/contrib/apache/apache.conf +++ /dev/null @@ -1,127 +0,0 @@ -# -# Sample Apache 2.x configuration where : -# - - - - ServerName registry.example.com - ServerAlias www.registry.example.com - - ProxyRequests off - ProxyPreserveHost on - - # no proxy for /error/ (Apache HTTPd errors messages) - ProxyPass /error/ ! - - ProxyPass /_ping http://localhost:5001/_ping - ProxyPassReverse /_ping http://localhost:5001/_ping - - ProxyPass /v1 http://localhost:5001/v1 - ProxyPassReverse /v1 http://localhost:5001/v1 - - # Logs - ErrorLog ${APACHE_LOG_DIR}/mirror_error_log - CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog - - - - - - - ServerName registry.example.com - ServerAlias www.registry.example.com - - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt - SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key - - # Higher Strength SSL Ciphers - SSLProtocol all -SSLv2 -SSLv3 -TLSv1 - SSLCipherSuite RC4-SHA:HIGH - SSLHonorCipherOrder on - - # Logs - ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log - CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog - - Header always set "Docker-Distribution-Api-Version" "registry/2.0" - Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" - RequestHeader set X-Forwarded-Proto "https" - - ProxyRequests off - ProxyPreserveHost on - - # no proxy for /error/ (Apache HTTPd errors messages) - ProxyPass /error/ ! - - # - # Registry v1 - # - - ProxyPass /v1 http://localhost:5000/v1 - ProxyPassReverse /v1 http://localhost:5000/v1 - - ProxyPass /_ping http://localhost:5000/_ping - ProxyPassReverse /_ping http://localhost:5000/_ping - - # Authentication require for push - - Order deny,allow - Allow from all - AuthName "Registry Authentication" - AuthType basic - AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" - - # Read access to authentified users - - Require valid-user - - - # Write access to docker-deployer account only - - Require user docker-deployer - - - - - # Allow ping to run unauthenticated. - - Satisfy any - Allow from all - - - # Allow ping to run unauthenticated. - - Satisfy any - Allow from all - - - # - # Registry v2 - # - - ProxyPass /v2 http://localhost:5002/v2 - ProxyPassReverse /v2 http://localhost:5002/v2 - - - Order deny,allow - Allow from all - AuthName "Registry Authentication" - AuthType basic - AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" - - # Read access to authentified users - - Require valid-user - - - # Write access to docker-deployer only - - Require user docker-deployer - - - - - - - diff --git a/vendor/github.com/docker/distribution/contrib/ceph/ci-setup.sh b/vendor/github.com/docker/distribution/contrib/ceph/ci-setup.sh deleted file mode 100755 index d907cf5c..00000000 --- a/vendor/github.com/docker/distribution/contrib/ceph/ci-setup.sh +++ /dev/null @@ -1,119 +0,0 @@ -#! /bin/bash -# -# Ceph cluster setup in Circle CI -# - -set -x -set -e -set -u - -NODE=$(hostname) -CEPHDIR=/tmp/ceph - -mkdir cluster -pushd cluster - -# Install -retries=0 -until [ $retries -ge 5 ]; do - pip install ceph-deploy && break - retries=$[$retries+1] - sleep 30 -done - -retries=0 -until [ $retries -ge 5 ]; do - ceph-deploy install --release hammer $NODE && break - retries=$[$retries+1] - sleep 30 -done - -retries=0 -until [ $retries -ge 5 ]; do - ceph-deploy pkg --install librados-dev $NODE && break - retries=$[$retries+1] - sleep 30 -done - -echo $(ip route get 1 | awk '{print $NF;exit}') $(hostname) >> /etc/hosts -ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" -cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys -ssh-keyscan $NODE >> ~/.ssh/known_hosts -ceph-deploy new $NODE - -cat >> ceph.conf < 74acc70fa106 - Removing intermediate container edb84c2b40cb - Successfully built 74acc70fa106 - - The commmand outputs its progress until it completes. - -4. Start your configuration with compose. - - $ docker-compose up - Recreating compose_registryv1_1... - Recreating compose_registryv2_1... - Recreating compose_nginx_1... - Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 - ... - - -5. In another terminal, display the running configuration. - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 - 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 - aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 - -### Explore a bit - -1. Check for TLS on your `nginx` server. - - $ curl -v https://localhost:5000 - * Rebuilt URL to: https://localhost:5000/ - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - * successfully set certificate verify locations: - * CAfile: none - CApath: /etc/ssl/certs - * SSLv3, TLS handshake, Client hello (1): - * SSLv3, TLS handshake, Server hello (2): - * SSLv3, TLS handshake, CERT (11): - * SSLv3, TLS alert, Server hello (2): - * SSL certificate problem: self signed certificate - * Closing connection 0 - curl: (60) SSL certificate problem: self signed certificate - More details here: http://curl.haxx.se/docs/sslcerts.html - -2. Tag the `v1` registry image. - - $ docker tag registry:latest localhost:5000/registry_one:latest - -2. Push it to the localhost. - - $ docker push localhost:5000/registry_one:latest - - If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. - -4. Use `curl` to list the image in the registry. - - $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 32777 (#0) - > GET /v2/registry1/tags/list HTTP/1.1 - > User-Agent: curl/7.36.0 - > Host: localhost:32777 - > Accept: */* - > - < HTTP/1.1 200 OK - < Content-Type: application/json; charset=utf-8 - < Docker-Distribution-Api-Version: registry/2.0 - < Date: Tue, 14 Apr 2015 22:34:13 GMT - < Content-Length: 39 - < - {"name":"registry1","tags":["latest"]} - * Connection #0 to host localhost left intact - - This example refers to the specific port assigned to the 2.0 registry. You saw - this port earlier, when you used `docker ps` to show your running containers. - - diff --git a/vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml b/vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml deleted file mode 100644 index 5cd04858..00000000 --- a/vendor/github.com/docker/distribution/contrib/compose/docker-compose.yml +++ /dev/null @@ -1,15 +0,0 @@ -nginx: - build: "nginx" - ports: - - "5000:5000" - links: - - registryv1:registryv1 - - registryv2:registryv2 -registryv1: - image: registry - ports: - - "5000" -registryv2: - build: "../../" - ports: - - "5000" diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile b/vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile deleted file mode 100644 index 2b252ec7..00000000 --- a/vendor/github.com/docker/distribution/contrib/compose/nginx/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM nginx:1.7 - -COPY nginx.conf /etc/nginx/nginx.conf -COPY registry.conf /etc/nginx/conf.d/registry.conf -COPY docker-registry.conf /etc/nginx/docker-registry.conf -COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf deleted file mode 100644 index 65c4d776..00000000 --- a/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf +++ /dev/null @@ -1,6 +0,0 @@ -proxy_pass http://docker-registry-v2; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf deleted file mode 100644 index 7b039a54..00000000 --- a/vendor/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf +++ /dev/null @@ -1,7 +0,0 @@ -proxy_pass http://docker-registry; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line -proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf deleted file mode 100644 index 63cd180d..00000000 --- a/vendor/github.com/docker/distribution/contrib/compose/nginx/nginx.conf +++ /dev/null @@ -1,27 +0,0 @@ -user nginx; -worker_processes 1; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - - keepalive_timeout 65; - - include /etc/nginx/conf.d/*.conf; -} - diff --git a/vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf b/vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf deleted file mode 100644 index 47ffd237..00000000 --- a/vendor/github.com/docker/distribution/contrib/compose/nginx/registry.conf +++ /dev/null @@ -1,41 +0,0 @@ -# Docker registry proxy for api versions 1 and 2 - -upstream docker-registry { - server registryv1:5000; -} - -upstream docker-registry-v2 { - server registryv2:5000; -} - -# No client auth or TLS -server { - listen 5000; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { - return 404; - } - - # To add basic authentication to v2 use auth_basic setting plus add_header - # auth_basic "registry.localhost"; - # auth_basic_user_file test.password; - # add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; - - include docker-registry-v2.conf; - } - - location / { - include docker-registry.conf; - } -} - diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile deleted file mode 100644 index 6061e99e..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -FROM debian:jessie - -MAINTAINER Docker Distribution Team - -# compile and runtime deps -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - # For DIND - ca-certificates \ - curl \ - iptables \ - procps \ - e2fsprogs \ - xz-utils \ - # For build - build-essential \ - file \ - git \ - net-tools \ - && apt-get clean && rm -rf /var/lib/apt/lists/* - -# Install Docker -ENV VERSION 1.7.1 -RUN curl -L -o /usr/local/bin/docker https://test.docker.com/builds/Linux/x86_64/docker-${VERSION} \ - && chmod +x /usr/local/bin/docker - -# Install DIND -RUN curl -L -o /dind https://raw.githubusercontent.com/docker/docker/master/hack/dind \ - && chmod +x /dind - -# Install bats -RUN cd /usr/local/src/ \ - && git clone https://github.com/sstephenson/bats.git \ - && cd bats \ - && ./install.sh /usr/local - -# Install docker-compose -RUN curl -L https://github.com/docker/compose/releases/download/1.3.3/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose \ - && chmod +x /usr/local/bin/docker-compose - -RUN mkdir -p /go/src/github.com/docker/distribution -WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration - -VOLUME /var/lib/docker - -ENTRYPOINT ["/dind"] diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/README.md b/vendor/github.com/docker/distribution/contrib/docker-integration/README.md deleted file mode 100644 index e12bec1a..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# Docker Registry Integration Testing - -These integration tests cover interactions between the Docker daemon and the -registry server. All tests are run using the docker cli. - -The compose configuration is intended to setup a testing environment for Docker -using multiple registry configurations. These configurations include different -combinations of a v1 and v2 registry as well as TLS configurations. - -## Running inside of Docker -### Get integration container -The container image to run the integation tests will need to be pulled or built -locally. - -*Building locally* -``` -$ docker build -t distribution/docker-integration . -``` - -### Run script - -Invoke the tests within Docker through the `run.sh` script. - -``` -$ ./run.sh -``` - -Run with aufs driver and tmp volume -**NOTE: Using a volume will prevent multiple runs from needing to -re-pull images** -``` -$ DOCKER_GRAPHDRIVER=aufs DOCKER_VOLUME=/tmp/volume ./run.sh -``` - -### Example developer flow - -These tests are useful for developing both as a registry and docker -core developer. The following setup may be used to do integration -testing between development versions - -Insert into your `.zshrc` or `.bashrc` - -``` -# /usr/lib/docker for Docker-in-Docker -# Set this directory to make each invocation run much faster, without -# the need to repull images. -export DOCKER_VOLUME=$HOME/.docker-test-volume - -# Use overlay for all Docker testing, try aufs if overlay not supported -export DOCKER_GRAPHDRIVER=overlay - -# Name this according to personal preference -function rdtest() { - if [ "$1" != "" ]; then - DOCKER_BINARY=$GOPATH/src/github.com/docker/docker/bundles/$1/binary/docker - if [ ! -f $DOCKER_BINARY ]; then - current_version=`cat $GOPATH/src/github.com/docker/docker/VERSION` - echo "$DOCKER_BINARY does not exist" - echo "Current checked out docker version: $current_version" - echo "Checkout desired version and run 'make binary' from $GOPATH/src/github.com/docker/docker" - return 1 - fi - fi - - $GOPATH/src/github.com/docker/distribution/contrib/docker-integration/run.sh -} -``` - -Run with Docker release version -``` -$ rdtest -``` - -Run using local development version of docker -``` -$ cd $GOPATH/src/github.com/docker/docker -$ make binary -$ rdtest `cat VERSION` -``` - -## Running manually outside of Docker - -### Install Docker Compose - -[Docker Compose Installation Guide](http://docs.docker.com/compose/install/) - -### Start compose setup -``` -docker-compose up -``` - -### Install Certificates -The certificates must be installed in /etc/docker/cert.d in order to use TLS -client auth and use the CA certificate. -``` -sudo sh ./install_certs.sh -``` - -### Test with Docker -Tag an image as with any other private registry. Attempt to push the image. - -``` -docker pull hello-world -docker tag hello-world localhost:5440/hello-world -docker push localhost:5440/hello-world - -docker tag hello-world localhost:5441/hello-world -docker push localhost:5441/hello-world -# Perform login using user `testuser` and password `passpassword` -``` - -### Set /etc/hosts entry -Find the non-localhost ip address of local machine - -### Run bats -Run the bats tests after updating /etc/hosts, installing the certificates, and -running the `docker-compose` script. -``` -bats -p . -``` - -## Configurations - -Port | V2 | V1 | TLS | Authentication ---- | --- | --- | --- | --- -5000 | yes | yes | no | none -5001 | no | yes | no | none -5002 | yes | no | no | none -5011 | no | yes | yes | none -5440 | yes | yes | yes | none -5441 | yes | yes | yes | basic (testuser/passpassword) -5442 | yes | yes | yes | TLS client -5443 | yes | yes | yes | TLS client (no CA) -5444 | yes | yes | yes | TLS client + basic (testuser/passpassword) -5445 | yes | yes | yes (no CA) | none -5446 | yes | yes | yes (no CA) | basic (testuser/passpassword) -5447 | yes | yes | yes (no CA) | TLS client -5448 | yes | yes | yes (SSLv3) | none diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml b/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml deleted file mode 100644 index d664c7bd..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml +++ /dev/null @@ -1,27 +0,0 @@ -nginx: - build: "nginx" - ports: - - "5000:5000" - - "5001:5001" - - "5002:5002" - - "5011:5011" - - "5440:5440" - - "5441:5441" - - "5442:5442" - - "5443:5443" - - "5444:5444" - - "5445:5445" - - "5446:5446" - - "5447:5447" - - "5448:5448" - links: - - registryv1:registryv1 - - registryv2:registryv2 -registryv1: - image: registry:0.9.1 - ports: - - "5000" -registryv2: - build: "../../" - ports: - - "5000" diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/install_certs.sh b/vendor/github.com/docker/distribution/contrib/docker-integration/install_certs.sh deleted file mode 100644 index c1fa2b20..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/install_certs.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -set -e - -hostname=$1 -if [ "$hostname" = "" ]; then - hostname="localhost" -fi - -mkdir -p /etc/docker/certs.d/$hostname:5011 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5011/ca.crt - -mkdir -p /etc/docker/certs.d/$hostname:5440 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5440/ca.crt - -mkdir -p /etc/docker/certs.d/$hostname:5441 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5441/ca.crt - -mkdir -p /etc/docker/certs.d/$hostname:5442 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5442/ca.crt -cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5442/client.cert -cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5442/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5443 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5443/ca.crt -cp ./nginx/ssl/registry-noca+client-cert.pem /etc/docker/certs.d/$hostname:5443/client.cert -cp ./nginx/ssl/registry-noca+client-key.pem /etc/docker/certs.d/$hostname:5443/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5444 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5444/ca.crt -cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5444/client.cert -cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5444/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5447 -cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5447/client.cert -cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5447/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5448 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5448/ca.crt diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile deleted file mode 100644 index 04515e8c..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM nginx:1.9 - -COPY nginx.conf /etc/nginx/nginx.conf -COPY registry.conf /etc/nginx/conf.d/registry.conf -COPY docker-registry.conf /etc/nginx/docker-registry.conf -COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf -COPY registry-noauth.conf /etc/nginx/registry-noauth.conf -COPY registry-basic.conf /etc/nginx/registry-basic.conf -COPY test.passwd /etc/nginx/test.passwd -COPY ssl /etc/nginx/ssl diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf deleted file mode 100644 index 65c4d776..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf +++ /dev/null @@ -1,6 +0,0 @@ -proxy_pass http://docker-registry-v2; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf deleted file mode 100644 index 5b1a2d58..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf +++ /dev/null @@ -1,7 +0,0 @@ -proxy_pass http://docker-registry; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_set_header Authorization ""; # see https://github.com/docker/docker-registry/issues/170 -proxy_read_timeout 900; diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf deleted file mode 100644 index 63cd180d..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf +++ /dev/null @@ -1,27 +0,0 @@ -user nginx; -worker_processes 1; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - - keepalive_timeout 65; - - include /etc/nginx/conf.d/*.conf; -} - diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf deleted file mode 100644 index 3c629ae8..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf +++ /dev/null @@ -1,13 +0,0 @@ -client_max_body_size 0; -chunked_transfer_encoding on; -location /v2/ { - auth_basic "registry.localhost"; - auth_basic_user_file test.passwd; - add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; - include docker-registry-v2.conf; -} -location / { - auth_basic "registry.localhost"; - auth_basic_user_file test.passwd; - include docker-registry.conf; -} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf deleted file mode 100644 index 883a2d48..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf +++ /dev/null @@ -1,8 +0,0 @@ -client_max_body_size 0; -chunked_transfer_encoding on; -location /v2/ { - include docker-registry-v2.conf; -} -location / { - include docker-registry.conf; -} diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf deleted file mode 100644 index b402eacb..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf +++ /dev/null @@ -1,277 +0,0 @@ -# Docker registry proxy for api versions 1 and 2 - -upstream docker-registry { - server registryv1:5000; -} - -upstream docker-registry-v2 { - server registryv2:5000; -} - -# No client auth or TLS -server { - listen 5000; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { - return 404; - } - - include docker-registry-v2.conf; - } - - location / { - include docker-registry.conf; - } -} - -# No client auth or TLS (V1 Only) -server { - listen 5001; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location / { - include docker-registry.conf; - } -} - -# No client auth or TLS (V2 Only) -server { - listen 5002; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location / { - include docker-registry-v2.conf; - } -} - -# TLS localhost (V1 Only) -server { - listen 5011; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - - client_max_body_size 0; - chunked_transfer_encoding on; - location / { - include docker-registry.conf; - } -} - -# TLS localregistry (V1 Only) -server { - listen 5011; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - - client_max_body_size 0; - chunked_transfer_encoding on; - location / { - include docker-registry.conf; - } -} - - - -# TLS Configuration chart -# Username/Password: testuser/passpassword -# | ca | client | basic | notes -# 5440 | yes | no | no | Tests CA certificate -# 5441 | yes | no | yes | Tests basic auth over TLS -# 5442 | yes | yes | no | Tests client auth with client CA -# 5443 | yes | yes | no | Tests client auth without client CA -# 5444 | yes | yes | yes | Tests using basic auth + tls auth -# 5445 | no | no | no | Tests insecure using TLS -# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS -# 5447 | no | yes | no | Tests client auth to insecure -# 5448 | yes | no | no | Bad SSL version - -server { - listen 5440; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - include registry-noauth.conf; -} - -server { - listen 5441; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - include registry-basic.conf; -} - -server { - listen 5442; - listen 5443; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5444; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-basic.conf; -} - -server { - listen 5445; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; - include registry-noauth.conf; -} - -server { - listen 5446; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; - include registry-basic.conf; -} - -server { - listen 5447; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5448; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - ssl_protocols SSLv3; - include registry-noauth.conf; -} - -# Add configuration for localregistry server_name -# Requires configuring /etc/hosts to use -# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing -# Docker secure/insecure registry features -server { - listen 5440; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - include registry-noauth.conf; -} - -server { - listen 5441; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - include registry-basic.conf; -} - -server { - listen 5442; - listen 5443; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5444; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-basic.conf; -} - -server { - listen 5445; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; - include registry-noauth.conf; -} - -server { - listen 5446; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; - include registry-basic.conf; -} - -server { - listen 5447; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5448; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - ssl_protocols SSLv3; - include registry-noauth.conf; -} - diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd b/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd deleted file mode 100644 index 4e55de81..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd +++ /dev/null @@ -1 +0,0 @@ -testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/run.sh b/vendor/github.com/docker/distribution/contrib/docker-integration/run.sh deleted file mode 100755 index 81ca2ad9..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/run.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -# Root directory of Distribution -DISTRIBUTION_ROOT=$(cd ../..; pwd -P) - -volumeMount="" -if [ "$DOCKER_VOLUME" != "" ]; then - volumeMount="-v ${DOCKER_VOLUME}:/var/lib/docker" -fi - -dockerMount="" -if [ "$DOCKER_BINARY" != "" ]; then - dockerMount="-v ${DOCKER_BINARY}:/usr/local/bin/docker" -fi - -# Image containing the integration tests environment. -INTEGRATION_IMAGE=${INTEGRATION_IMAGE:-distribution/docker-integration} - -# Make sure we upgrade the integration environment. -docker pull $INTEGRATION_IMAGE - -# Start the integration tests in a Docker container. -docker run --rm -t --privileged $volumeMount $dockerMount \ - -v ${DISTRIBUTION_ROOT}:/go/src/github.com/docker/distribution \ - -e "STORAGE_DRIVER=$DOCKER_GRAPHDRIVER" \ - -e "EXEC_DRIVER=$EXEC_DRIVER" \ - ${INTEGRATION_IMAGE} \ - ./test_runner.sh "$@" diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh b/vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh deleted file mode 100755 index 1917b688..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash - -# Run the integration tests with multiple versions of the Docker engine - -set -e -set -x - -# Don't use /tmp because this isn't available in boot2docker -tmpdir_template="`pwd`/docker-versions.XXXXX" -tmpdir=`mktemp -d "$tmpdir_template"` -trap "rm -rf $tmpdir" EXIT - -if [ "$1" == "-d" ]; then - # Start docker daemon - - # Drivers to use for Docker engines the tests are going to create. - STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} - EXEC_DRIVER=${EXEC_DRIVER:-native} - - docker --daemon --log-level=panic \ - --storage-driver="$STORAGE_DRIVER" --exec-driver="$EXEC_DRIVER" & - DOCKER_PID=$! - - # Wait for it to become reachable. - tries=10 - until docker version &> /dev/null; do - (( tries-- )) - if [ $tries -le 0 ]; then - echo >&2 "error: daemon failed to start" - exit 1 - fi - sleep 1 - done -fi - -# If DOCKER_VOLUME is unset, create a temporary directory to cache containers -# between runs -# Only do this on Linux, because using /var/lib/docker from a host volume seems -# problematic with boot2docker. -if [ "$DOCKER_VOLUME" = "" -a `uname` = "Linux" ]; then - volumes_template="`pwd`/docker-versions.XXXXX" - volume=`mktemp -d "$volumes_template"` - trap "rm -rf $tmpdir $volume" EXIT -else - volume="$DOCKER_VOLUME" -fi - -# Released versions - -versions="1.6.0 1.6.1 1.7.0 1.7.1" - -for v in $versions; do - echo "Extracting Docker $v from dind image" - binpath="$tmpdir/docker-$v/docker" - ID=$(docker create dockerswarm/dind:$v) - docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-$v" - - echo "Running tests with Docker $v" - DOCKER_BINARY="$binpath" DOCKER_VOLUME="$volume" ./run.sh - - # Cleanup. - docker rm -f "$ID" -done - -# Latest experimental version - -echo "Extracting Docker master from dind image" -binpath="$tmpdir/docker-master/docker" -docker pull dockerswarm/dind-master -ID=$(docker create dockerswarm/dind-master) -docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-master" - -echo "Running tests with Docker master" -DOCKER_BINARY="$binpath" DOCKER_VOLUME="$volume" ./run.sh - -# Cleanup. -docker rm -f "$ID" diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/test_runner.sh b/vendor/github.com/docker/distribution/contrib/docker-integration/test_runner.sh deleted file mode 100755 index 2c958c5e..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/test_runner.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -# Load the helpers. -#. helpers.bash - -TESTS=${@:-.} - -# Drivers to use for Docker engines the tests are going to create. -STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} -EXEC_DRIVER=${EXEC_DRIVER:-native} - - -function execute() { - >&2 echo "++ $@" - eval "$@" -} - -# Set IP address in /etc/hosts for localregistry -IP=$(ifconfig eth0|grep "inet addr:"| cut -d: -f2 | awk '{ print $1}') -execute echo "$IP localregistry" >> /etc/hosts - -# Setup certificates -execute sh install_certs.sh localregistry - -# Start the docker engine. -execute docker --daemon --log-level=panic \ - --storage-driver="$STORAGE_DRIVER" --exec-driver="$EXEC_DRIVER" & -DOCKER_PID=$! - -# Wait for it to become reachable. -tries=10 -until docker version &> /dev/null; do - (( tries-- )) - if [ $tries -le 0 ]; then - echo >&2 "error: daemon failed to start" - exit 1 - fi - sleep 1 -done - -execute time docker-compose build - -execute docker-compose up -d - -# Run the tests. -execute time bats -p $TESTS - diff --git a/vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats b/vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats deleted file mode 100644 index 8b7ae287..00000000 --- a/vendor/github.com/docker/distribution/contrib/docker-integration/tls.bats +++ /dev/null @@ -1,102 +0,0 @@ -# Registry host name, should be set to non-localhost address and match -# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d -hostname="localregistry" - -image="hello-world:latest" - -# Login information, should match values in nginx/test.passwd -user="testuser" -password="passpassword" -email="distribution@docker.com" - -function setup() { - docker pull $image -} - -# skip basic auth tests with Docker 1.6, where they don't pass due to -# certificate issues -function basic_auth_version_check() { - run sh -c 'docker version | fgrep -q "Client version: 1.6."' - if [ "$status" -eq 0 ]; then - skip "Basic auth tests don't support 1.6.x" - fi -} - -# has_digest enforces the last output line is "Digest: sha256:..." -# the input is the name of the array containing the output lines -function has_digest() { - filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') - [ "$filtered" != "" ] -} - -function login() { - run docker login -u $user -p $password -e $email $1 - [ "$status" -eq 0 ] - # First line is WARNING about credential save - [ "${lines[1]}" = "Login Succeeded" ] -} - -@test "Test valid certificates" { - docker tag -f $image $hostname:5440/$image - run docker push $hostname:5440/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test basic auth" { - basic_auth_version_check - login $hostname:5441 - docker tag -f $image $hostname:5441/$image - run docker push $hostname:5441/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test TLS client auth" { - docker tag -f $image $hostname:5442/$image - run docker push $hostname:5442/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test TLS client with invalid certificate authority fails" { - docker tag -f $image $hostname:5443/$image - run docker push $hostname:5443/$image - [ "$status" -ne 0 ] -} - -@test "Test basic auth with TLS client auth" { - basic_auth_version_check - login $hostname:5444 - docker tag -f $image $hostname:5444/$image - run docker push $hostname:5444/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test unknown certificate authority fails" { - docker tag -f $image $hostname:5445/$image - run docker push $hostname:5445/$image - [ "$status" -ne 0 ] -} - -@test "Test basic auth with unknown certificate authority fails" { - run login $hostname:5446 - [ "$status" -ne 0 ] - docker tag -f $image $hostname:5446/$image - run docker push $hostname:5446/$image - [ "$status" -ne 0 ] -} - -@test "Test TLS client auth to server with unknown certificate authority fails" { - docker tag -f $image $hostname:5447/$image - run docker push $hostname:5447/$image - [ "$status" -ne 0 ] -} - -@test "Test failure to connect to server fails to fallback to SSLv3" { - docker tag -f $image $hostname:5448/$image - run docker push $hostname:5448/$image - [ "$status" -ne 0 ] -} - diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go index 68991685..a0221216 100644 --- a/vendor/github.com/docker/distribution/digest/digest.go +++ b/vendor/github.com/docker/distribution/digest/digest.go @@ -70,15 +70,10 @@ func ParseDigest(s string) (Digest, error) { return d, d.Validate() } -// FromReader returns the most valid digest for the underlying content. +// FromReader returns the most valid digest for the underlying content using +// the canonical digest algorithm. func FromReader(rd io.Reader) (Digest, error) { - digester := Canonical.New() - - if _, err := io.Copy(digester.Hash(), rd); err != nil { - return "", err - } - - return digester.Digest(), nil + return Canonical.FromReader(rd) } // FromTarArchive produces a tarsum digest from reader rd. diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go index 556dd93a..4f03e189 100644 --- a/vendor/github.com/docker/distribution/digest/digester.go +++ b/vendor/github.com/docker/distribution/digest/digester.go @@ -3,6 +3,7 @@ package digest import ( "crypto" "hash" + "io" ) // Algorithm identifies and implementation of a digester by an identifier. @@ -49,6 +50,22 @@ func (a Algorithm) Available() bool { return h.Available() } +func (a Algorithm) String() string { + return string(a) +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + return nil +} + // New returns a new digester for the specified algorithm. If the algorithm // does not have a digester implementation, nil will be returned. This can be // checked by calling Available before calling New. @@ -69,6 +86,17 @@ func (a Algorithm) Hash() hash.Hash { return algorithms[a].New() } +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.New() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + // TODO(stevvooe): Allow resolution of verifiers using the digest type and // this registration system. diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go index 271d35db..3fac41b4 100644 --- a/vendor/github.com/docker/distribution/digest/set.go +++ b/vendor/github.com/docker/distribution/digest/set.go @@ -4,6 +4,7 @@ import ( "errors" "sort" "strings" + "sync" ) var ( @@ -27,6 +28,7 @@ var ( // the complete set of digests. To mitigate collisions, an // appropriately long short code should be used. type Set struct { + mutex sync.RWMutex entries digestEntries } @@ -63,6 +65,8 @@ func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { // with an empty digest value. If multiple matches are found // ErrDigestAmbiguous will be returned with an empty digest value. func (dst *Set) Lookup(d string) (Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() if len(dst.entries) == 0 { return "", ErrDigestNotFound } @@ -101,13 +105,15 @@ func (dst *Set) Lookup(d string) (Digest, error) { return dst.entries[idx].digest, nil } -// Add adds the given digests to the set. An error will be returned +// Add adds the given digest to the set. An error will be returned // if the given digest is invalid. If the digest already exists in the -// table, this operation will be a no-op. +// set, this operation will be a no-op. func (dst *Set) Add(d Digest) error { if err := d.Validate(); err != nil { return err } + dst.mutex.Lock() + defer dst.mutex.Unlock() entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} searchFunc := func(i int) bool { if dst.entries[i].val == entry.val { @@ -130,12 +136,56 @@ func (dst *Set) Add(d Digest) error { return nil } +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + // ShortCodeTable returns a map of Digest to unique short codes. The // length represents the minimum value, the maximum length may be the // entire value of digest if uniqueness cannot be achieved without the // full value. This function will attempt to make short codes as short // as possible to be unique. func ShortCodeTable(dst *Set, length int) map[Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() m := make(map[Digest]string, len(dst.entries)) l := length resetIdx := 0 diff --git a/vendor/github.com/docker/distribution/digest/set_test.go b/vendor/github.com/docker/distribution/digest/set_test.go index faeba6d3..0c0f650d 100644 --- a/vendor/github.com/docker/distribution/digest/set_test.go +++ b/vendor/github.com/docker/distribution/digest/set_test.go @@ -125,6 +125,66 @@ func TestAddDuplication(t *testing.T) { } } +func TestRemove(t *testing.T) { + digests, err := createDigests(10) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup(digests[0].String()) + if err != nil { + t.Fatal(err) + } + if dgst != digests[0] { + t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst) + } + + if err := dset.Remove(digests[0]); err != nil { + t.Fatal(err) + } + + if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound { + t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err) + } +} + +func TestAll(t *testing.T) { + digests, err := createDigests(100) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + all := map[Digest]struct{}{} + for _, dgst := range dset.All() { + all[dgst] = struct{}{} + } + + if len(all) != len(digests) { + t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all)) + } + + for i, dgst := range digests { + if _, ok := all[dgst]; !ok { + t.Fatalf("Missing element at position %d: %s", i, dgst) + } + } + +} + func assertEqualShort(t *testing.T, actual, expected string) { if actual != expected { t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) @@ -219,6 +279,29 @@ func benchLookupNTable(b *testing.B, n int, shortLen int) { } } +func benchRemoveNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + b.StopTimer() + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for j := range digests { + if err = dset.Remove(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + func benchShortCodeNTable(b *testing.B, n int, shortLen int) { digests, err := createDigests(n) if err != nil { @@ -249,6 +332,18 @@ func BenchmarkAdd1000(b *testing.B) { benchAddNTable(b, 1000) } +func BenchmarkRemove10(b *testing.B) { + benchRemoveNTable(b, 10) +} + +func BenchmarkRemove100(b *testing.B) { + benchRemoveNTable(b, 100) +} + +func BenchmarkRemove1000(b *testing.B) { + benchRemoveNTable(b, 1000) +} + func BenchmarkLookup10(b *testing.B) { benchLookupNTable(b, 10, 12) } diff --git a/vendor/github.com/docker/distribution/digest/tarsum.go b/vendor/github.com/docker/distribution/digest/tarsum.go index 702d7dc3..9effeb2e 100644 --- a/vendor/github.com/docker/distribution/digest/tarsum.go +++ b/vendor/github.com/docker/distribution/digest/tarsum.go @@ -6,7 +6,7 @@ import ( "regexp" ) -// TarSumRegexp defines a regular expression to match tarsum identifiers. +// TarsumRegexp defines a regular expression to match tarsum identifiers. var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+") // TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with diff --git a/vendor/github.com/docker/distribution/docs/Dockerfile b/vendor/github.com/docker/distribution/docs/Dockerfile deleted file mode 100644 index 0ed4e526..00000000 --- a/vendor/github.com/docker/distribution/docs/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM docs/base:latest -MAINTAINER Mary Anthony (@moxiegirl) - -# To get the git info for this repo -COPY . /src - -COPY . /docs/content/registry/ - -# Sed to process GitHub Markdown -# 1-2 Remove comment code from metadata block -# 3 Change ](/word to ](/project/ in links -# 4 Change ](word.md) to ](/project/word) -# 5 Remove .md extension from link text -# 6 Change ](./ to ](/project/word) -# 7 Change ](../../ to ](/project/ -# 8 Change ](../ to ](/project/ -# -RUN find /docs/content/registry -type f -name "*.md" -exec sed -i.old \ - -e '/^/g' \ - -e '/^/g' \ - -e 's/\(\]\)\([(]\)\(\/\)/\1\2\/registry\//g' \ - -e 's/\(\][(]\)\([A-Za-z0-9]*\)\(\.md\)/\1\/registry\/\2/g' \ - -e 's/\([(]\)\(.*\)\(\.md\)/\1\2/g' \ - -e 's/\(\][(]\)\(\.\/\)/\1\/registry\//g' \ - -e 's/\(\][(]\)\(\.\.\/\.\.\/\)/\1\/registry\//g' \ - -e 's/\(\][(]\)\(\.\.\/\)/\1\/registry\//g' {} \; \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/Makefile b/vendor/github.com/docker/distribution/docs/Makefile deleted file mode 100644 index 021e8f6e..00000000 --- a/vendor/github.com/docker/distribution/docs/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate - -# env vars passed through directly to Docker's build scripts -# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily -# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these -DOCKER_ENVS := \ - -e BUILDFLAGS \ - -e DOCKER_CLIENTONLY \ - -e DOCKER_EXECDRIVER \ - -e DOCKER_GRAPHDRIVER \ - -e TESTDIRS \ - -e TESTFLAGS \ - -e TIMEOUT -# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds - -# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) -DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) - -# to allow `make DOCSPORT=9000 docs` -DOCSPORT := 8000 - -# Get the IP ADDRESS -DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") -HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") -HUGO_BIND_IP=0.0.0.0 - -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) - - -DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE - -# for some docs workarounds (see below in "docs-build" target) -GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) - -default: docs - -docs: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) - -docs-draft: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) - - -docs-shell: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash - - -docs-build: -# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files -# echo "$(GIT_BRANCH)" > GIT_BRANCH -# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET -# echo "$(GITCOMMIT)" > GITCOMMIT - docker build -t "$(DOCKER_DOCS_IMAGE)" . diff --git a/vendor/github.com/docker/distribution/docs/architecture.md b/vendor/github.com/docker/distribution/docs/architecture.md deleted file mode 100644 index 558a1199..00000000 --- a/vendor/github.com/docker/distribution/docs/architecture.md +++ /dev/null @@ -1,54 +0,0 @@ - - -# Architecture - -## Design -**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. - -### Eventual Consistency - -> **NOTE:** This section belongs somewhere, perhaps in a design document. We -> are leaving this here so the information is not lost. - -Running the registry on eventually consistent backends has been part of the -design from the beginning. This section covers some of the approaches to -dealing with this reality. - -There are a few classes of issues that we need to worry about when -implementing something on top of the storage drivers: - -1. Read-After-Write consistency (see this [article on - s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). -2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). - -In reality, the registry must worry about these kinds of errors when doing the -following: - -1. Accepting data into a temporary upload file may not have latest data block - yet (read-after-write). -2. Moving uploaded data into its blob location (write-write race). -3. Modifying the "current" manifest for given tag (write-write race). -4. A whole slew of operations around deletes (read-after-write, delete-write - races, garbage collection, etc.). - -The backend path layout employs a few techniques to avoid these problems: - -1. Large writes are done to private upload directories. This alleviates most - of the corruption potential under multiple writers by avoiding multiple - writers. -2. Constraints in storage driver implementations, such as support for writing - after the end of a file to extend it. -3. Digest verification to avoid data corruption. -4. Manifest files are stored by digest and cannot change. -5. All other non-content files (links, hashes, etc.) are written as an atomic - unit. Anything that requires additions and deletions is broken out into - separate "files". Last writer still wins. - -Unfortunately, one must play this game when trying to build something like -this on top of eventually consistent storage systems. If we run into serious -problems, we can wrap the storagedrivers in a shared consistency layer but -that would increase complexity and hinder registry cluster performance. diff --git a/vendor/github.com/docker/distribution/docs/authentication.md b/vendor/github.com/docker/distribution/docs/authentication.md deleted file mode 100644 index 507c9a66..00000000 --- a/vendor/github.com/docker/distribution/docs/authentication.md +++ /dev/null @@ -1,185 +0,0 @@ - - -# Authentication - -While running an unrestricted registry is certainly ok for development, secured local networks, or test setups, you should probably implement access restriction if you plan on making your registry available to a wider audience or through public internet. - -The Registry supports two different authentication methods to get your there: - - * direct authentication, through the use of a proxy - * delegated authentication, redirecting to a trusted token server - -The first method is recommended for most people as the most straight-forward solution. - -The second method requires significantly more investment, and only make sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems. - -## Direct authentication through a proxy - -With this method, you implement basic authentication in a reverse proxy that sits in front of your registry. - -Since the Docker engine uses basic authentication to negotiate access to the Registry, securing communication between docker engines and your proxy is absolutely paramount. - -While this model gives you the ability to use whatever authentication backend you want through a secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. - -Below is a simple example of secured basic authentication (using TLS), using nginx as a proxy. - -### Requirements - -You should have followed entirely the basic [deployment guide](deploying.md). - -If you have not, please take the time to do so. - -At this point, it's assumed that: - - * you understand Docker security requirements, and how to configure your docker engines properly - * you have installed Docker Compose - * you have a `domain.crt` and `domain.key` files, for the CN `myregistrydomain.com` (or whatever domain name you want to use) - * these files are located inside the current directory, and there is nothing else in that directory - * it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates - * be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm registry`) - -### Setting things up - -Read again the requirements. - -Ready? - -Run the following: - -``` -mkdir auth -mkdir data - -# This is the main nginx configuration you will use -cat < auth/registry.conf -upstream docker-registry { - server registry:5000; -} - -server { - listen 443 ssl; - server_name myregistrydomain.com; - - # SSL - ssl_certificate /etc/nginx/conf.d/domain.crt; - ssl_certificate_key /etc/nginx/conf.d/domain.key; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) { - return 404; - } - - # To add basic authentication to v2 use auth_basic setting plus add_header - auth_basic "registry.localhost"; - auth_basic_user_file /etc/nginx/conf.d/registry.password; - add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; - - proxy_pass http://docker-registry; - proxy_set_header Host \$http_host; # required for docker client's sake - proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP - proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto \$scheme; - proxy_read_timeout 900; - } -} -EOF - -# Now, create a password file for "testuser" and "testpassword" -echo 'testuser:$2y$05$.nIfPAEgpWCh.rpts/XHX.UOfCRNtvMmYjh6sY/AZBmeg/dQyN62q' > auth/registry.password - -# Alternatively you could have achieved the same thing with htpasswd -# htpasswd -Bbc auth/registry.password testuser testpassword - -# Copy over your certificate files -cp domain.crt auth -cp domain.key auth - -# Now create your compose file - -cat < docker-compose.yml -nginx: - image: "nginx:1.9" - ports: - - 5043:443 - links: - - registry:registry - volumes: - - `pwd`/auth/:/etc/nginx/conf.d - -registry: - image: registry:2 - ports: - - 127.0.0.1:5000:5000 - environment: - REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /data - volumes: - - `pwd`/data:/data -EOF -``` - -### Starting and stopping - -That's it. You can now: - - * `docker-compose up -d` to start your registry - * `docker login myregistrydomain.com:5043` (using `testuser` and `testpassword`) - * `docker tag ubuntu myregistrydomain.com:5043/toto` - * `docker push myregistrydomain.com:5043/toto` - -### Docker still complains about the certificate? - -That's certainly because you are using a self-signed certificate, despite the warnings. - -If you really insist on using these, you have to trust it at the OS level. - -Usually, on Ubuntu this is done with: -``` -cp auth/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt -update-ca-certificates -``` - -... and on RedHat with: -``` -cp auth/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt -update-ca-trust -``` - -Now: - - * `service docker stop && service docker start` (or any other way you use to restart docker) - * `docker-compose up -d` to bring your registry up - -## Token-based delegated authentication - -This is **advanced**. - -You will find [background information here](./spec/auth/token.md), [configuration information here](configuration.md#auth). - -Beware that you will have to implement your own authentication service for this to work (though there exist third-party open-source implementations). - -# Manual Set-up - -If you'd like to manually configure your HTTP server, here are a few requirements that are absolutely necessary for the docker client to be able to interface with it: - -- Each response needs to have the header "Docker-Distribution-Api-Version registry/2.0" set, even (especially) if there is a 401 or 404 error response. Make sure using cURL that this header is provided. Note: If you're using Nginx, this functionality is only available since 1.7.5 using the "always" add_header directive, or when compiling with the "more_set_headers" module. - -- A large enough maximum for client body size, preferably unlimited. Because images can be pretty big, the very low default maximum size of most HTTP servers won't be sufficient to be able to upload the files. - -- Support for chunked transfer encoding. diff --git a/vendor/github.com/docker/distribution/docs/building.md b/vendor/github.com/docker/distribution/docs/building.md deleted file mode 100644 index b54322c8..00000000 --- a/vendor/github.com/docker/distribution/docs/building.md +++ /dev/null @@ -1,157 +0,0 @@ - - -# Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - -```sh -go get github.com/docker/distribution/cmd/registry -``` - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - -```sh -mkdir -p /var/lib/registry -``` - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - -``` -$ $GOPATH/bin/registry -version -$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown -``` - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - -``` -$ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config-dev.yml -INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown -INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown -INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown -INFO[0000] debug server listening localhost:5001 -``` - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - -``` -go get github.com/tools/godep github.com/golang/lint/golint -``` - -**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - -``` -$ GOPATH=`godep path`:$GOPATH make -+ clean -+ fmt -+ vet -+ lint -+ build -github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar -github.com/Sirupsen/logrus -github.com/docker/libtrust -... -github.com/yvasiyarov/gorelic -github.com/docker/distribution/registry/handlers -github.com/docker/distribution/cmd/registry -+ test -... -ok github.com/docker/distribution/digest 7.875s -ok github.com/docker/distribution/manifest 0.028s -ok github.com/docker/distribution/notifications 17.322s -? github.com/docker/distribution/registry [no test files] -ok github.com/docker/distribution/registry/api/v2 0.101s -? github.com/docker/distribution/registry/auth [no test files] -ok github.com/docker/distribution/registry/auth/silly 0.011s -... -+ /Users/sday/go/src/github.com/docker/distribution/bin/registry -+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template -+ /Users/sday/go/src/github.com/docker/distribution/bin/dist -+ binaries -``` - -The above provides a repeatable build using the contents of the vendored -Godeps directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - -```sh -$ ./bin/registry -version -./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m -``` - -### Developing - -The above approaches are helpful for small experimentation. If more complex -tasks are at hand, it is recommended to employ the full power of `godep`. - -The Makefile is designed to have its `GOPATH` defined externally. This allows -one to experiment with various development environment setups. This is -primarily useful when testing upstream bugfixes, by modifying local code. This -can be demonstrated using `godep` to migrate the `GOPATH` to use the specified -dependencies. The `GOPATH` can be migrated to the current package versions -declared in `Godeps` with the following command: - -```sh -godep restore -``` - -> **WARNING:** This command will checkout versions of the code specified in -> Godeps/Godeps.json, modifying the contents of `GOPATH`. If this is -> undesired, it is recommended to create a workspace devoted to work on the -> _Distribution_ project. - -With a successful run of the above command, one can now use `make` without -specifying the `GOPATH`: - -```sh -$ make -``` - -If that is successful, standard `go` commands, such as `go test` should work, -per package, without issue. - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. - -To enable the [Ceph RADOS storage driver](storage-drivers/rados.md) -(librados-dev and librbd-dev will be required to build the bindings): - -```sh -export DOCKER_BUILDTAGS='include_rados' -``` diff --git a/vendor/github.com/docker/distribution/docs/configuration.md b/vendor/github.com/docker/distribution/docs/configuration.md deleted file mode 100644 index f2f58a4d..00000000 --- a/vendor/github.com/docker/distribution/docs/configuration.md +++ /dev/null @@ -1,1630 +0,0 @@ - - - - -# Registry Configuration Reference - -The Registry configuration is based on a YAML file, detailed below. While it comes with sane default values out of the box, you are heavily encouraged to review it exhaustively before moving your systems to production. - -## Override configuration options - -In a typical setup where you run your Registry from the official image, you can specify any configuration variable from the environment by passing `-e` arguments to your `docker run` stanza, or from within a Dockerfile using the `ENV` instruction. - -To override a configuration option, create an environment variable named -`REGISTRY_variable` where *`variable`* is the name of the configuration option -and the `_` (underscore) represents indention levels. For example, you can -configure the `rootdirectory` of the `filesystem` storage backend: - - storage: - filesystem: - rootdirectory: /var/lib/registry - -To override this value, set an environment variable like this: - - REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere - -This variable overrides the `/var/lib/registry` value to the `/somewhere` -directory. - ->**Note**: If an environment variable changes a map value into a string, such ->as replacing the storage driver type with `REGISTRY_STORAGE=filesystem`, then ->all sub-fields will be erased. As such, specifying the storage type in the ->environment will remove all parameters related to the old storage ->configuration. - - - -## List of configuration options - -This section lists all the registry configuration options. Some options in -the list are mutually exclusive. So, make sure to read the detailed reference -information about each option that appears later in this page. - - version: 0.1 - log: - level: debug - formatter: text - fields: - service: registry - environment: staging - hooks: - - type: mail - disabled: true - levels: - - panic - options: - smtp: - addr: mail.example.com:25 - username: mailuser - password: password - insecure: true - from: sender@example.com - to: - - errors@example.com - loglevel: debug # deprecated: use "log" - storage: - filesystem: - rootdirectory: /var/lib/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - rados: - poolname: radospool - username: radosuser - chunksize: 4194304 - swift: - username: username - password: password - authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth - tenant: tenantname - tenantid: tenantid - domain: domain name for Openstack Identity v3 API - domainid: domain id for Openstack Identity v3 API - insecureskipverify: true - region: fr - container: containername - rootdirectory: /swift/object/name/prefix - redirect: - disable: false - cache: - blobdescriptor: redis - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false - auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle - htpasswd: - realm: basic-realm - path: /path/to/htpasswd - middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 - reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true - http: - addr: localhost:5000 - prefix: /my/nested/registry/ - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 - notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 - redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - -In some instances a configuration option is **optional** but it contains child -options marked as **required**. This indicates that you can omit the parent with -all its children. However, if the parent is included, you must also include all -the children marked **required**. - - - -## version - - version: 0.1 - -The `version` option is **required**. It specifies the configuration's version. -It is expected to remain a top-level field, to allow for a consistent version -check before parsing the remainder of the configuration file. - -## log - -The `log` subsection configures the behavior of the logging system. The logging -system outputs everything to stdout. You can adjust the granularity and format -with this configuration section. - - log: - level: debug - formatter: text - fields: - service: registry - environment: staging - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- level - - no - - Sets the sensitivity of logging output. Permitted values are - error, warn, info and - debug. The default is info. -
- formatter - - no - - This selects the format of logging output. The format primarily affects how keyed - attributes for a log line are encoded. Options are text, json or - logstash. The default is text. -
- fields - - no - - A map of field names to values. These are added to every log line for - the context. This is useful for identifying log messages source after - being mixed in other systems. -
- -## hooks - - hooks: - - type: mail - levels: - - panic - options: - smtp: - addr: smtp.sendhost.com:25 - username: sendername - password: password - insecure: true - from: name@sendhost.com - to: - - name@receivehost.com - -The `hooks` subsection configures the logging hooks' behavior. This subsection -includes a sequence handler which you can use for sending mail, for example. -Refer to `loglevel` to configure the level of messages printed. - -## loglevel - -> **DEPRECATED:** Please use [log](#log) instead. - - loglevel: debug - -Permitted values are `error`, `warn`, `info` and `debug`. The default is -`info`. - -## storage - - storage: - filesystem: - rootdirectory: /var/lib/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - rados: - poolname: radospool - username: radosuser - chunksize: 4194304 - swift: - username: username - password: password - authurl: https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth - tenant: tenantname - tenantid: tenantid - domain: domain name for Openstack Identity v3 API - domainid: domain id for Openstack Identity v3 API - insecureskipverify: true - region: fr - container: containername - rootdirectory: /swift/object/name/prefix - cache: - blobdescriptor: inmemory - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false - redirect: - disable: false - -The storage option is **required** and defines which storage backend is in use. -You must configure one backend; if you configure more, the registry returns an error. - -If you are deploying a registry on Windows, be aware that a Windows volume mounted from the host is not recommended. Instead, you can use a S3, or Azure, backing data-store. If you do use a Windows volume, you must ensure that the `PATH` to the mount point is within Window's `MAX_PATH` limits. Failure to do so can result in the following error message: - - mkdir /XXX protocol error and your registry will not function properly. - -### cache - -Use the `cache` subsection to enable caching of data accessed in the storage -backend. Currently, the only available cache provides fast access to layer -metadata. This, if configured, uses the `blobdescriptor` field. - -You can set `blobdescriptor` field to `redis` or `inmemory`. The `redis` value uses -a Redis pool to cache layer metadata. The `inmemory` value uses an in memory -map. - ->**NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these ->are equivalent, `layerinfo` has been deprecated, in favor or ->`blobdescriptor`. - -### redirect - -The `redirect` subsection provides configuration for managing redirects from -content backends. For backends that support it, redirecting is enabled by -default. Certain deployment scenarios may prefer to route all data through the -Registry, rather than redirecting to the backend. This may be more efficient -when using a backend that is not colocated or when a registry instance is -doing aggressive caching. - -Redirects can be disabled by adding a single flag `disable`, set to `true` -under the `redirect` section: - - redirect: - disable: true - -### filesystem - -The `filesystem` storage backend uses the local disk to store registry files. It -is ideal for development and may be appropriate for some small-scale production -applications. - -This backend has a single, required `rootdirectory` parameter. The parameter -specifies the absolute path to a directory. The registry stores all its data -here so make sure there is adequate space available. - -### azure - -This storage backend uses Microsoft's Azure Blob Storage. - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- accountname - - yes - - Azure account name. -
- accountkey - - yes - - Azure account key. -
- container - - yes - - Name of the Azure container into which to store data. -
- realm - - no - - Domain name suffix for the Storage Service API endpoint. By default, this - is core.windows.net. -
- - -### rados - -This storage backend uses [Ceph Object Storage](http://ceph.com/docs/master/rados/). - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- poolname - - yes - - Ceph pool name. -
- username - - no - - Ceph cluster user to connect as (i.e. admin, not client.admin). -
- chunksize - - no - - Size of the written RADOS objects. Default value is 4MB (4194304). -
- - -### S3 - -This storage backend uses Amazon's Simple Storage Service (S3). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- accesskey - - yes - - Your AWS Access Key. -
- secretkey - - yes - - Your AWS Secret Key. -
- region - - yes - - The AWS region in which your bucket exists. For the moment, the Go AWS - library in use does not use the newer DNS based bucket routing. -
- bucket - - yes - - The bucket name in which you want to store the registry's data. -
- encrypt - - no - - Specifies whether the registry stores the image in encrypted format or - not. A boolean value. The default is false. -
- secure - - no - - Indicates whether to use HTTPS instead of HTTP. A boolean value. The - default is false. -
- v4auth - - no - - Indicates whether the registry uses Version 4 of AWS's authentication. - Generally, you should set this to true. By default, this is - false. -
- chunksize - - no - - The S3 API requires multipart upload chunks to be at least 5MB. This value - should be a number that is larger than 5*1024*1024. -
- rootdirectory - - no - - This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. -
- -### Maintenance - -Currently the registry can perform one maintenance function: upload purging. This and future -maintenance functions which are related to storage can be configured under the maintenance section. - -### Upload Purging - -Upload purging is a background process that periodically removes orphaned files from the upload -directories of the registry. Upload purging is enabled by default. To - configure upload directory purging, the following parameters -must be set. - - -| Parameter | Required | Description - --------- | -------- | ----------- -`enabled` | yes | Set to true to enable upload purging. Default=true. | -`age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week) -`interval` | yes | The interval between upload directory purging. Default=24h. -`dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false. - -Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). - -### Openstack Swift - -This storage backend uses Openstack Swift object storage. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- authurl - - yes - - URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth -
- username - - yes - - Your Openstack user name. -
- password - - yes - - Your Openstack password. -
- region - - no - - The Openstack region in which your container exists. -
- container - - yes - - The container name in which you want to store the registry's data. -
- tenant - - no - - Your Openstack tenant name. -
- tenantid - - no - - Your Openstack tenant id. -
- domain - - no - - Your Openstack domain name for Identity v3 API. -
- domainid - - no - - Your Openstack domain id for Identity v3 API. -
- insecureskipverify - - no - - true to skip TLS verification, false by default. -
- chunksize - - no - - Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). -
- rootdirectory - - no - - This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. -
- - -## auth - - auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle - htpasswd: - realm: basic-realm - path: /path/to/htpasswd - -The `auth` option is **optional**. There are -currently 2 possible auth providers, `silly` and `token`. You can configure only -one `auth` provider. - -### silly - -The `silly` auth is only for development purposes. It simply checks for the -existence of the `Authorization` header in the HTTP request. It has no regard for -the header's value. If the header does not exist, the `silly` auth responds with a -challenge response, echoing back the realm, service, and scope that access was -denied for. - -The following values are used to configure the response: - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- realm - - yes - - The realm in which the registry server authenticates. -
- service - - yes - - The service being authenticated. -
- - - -### token - -Token based authentication allows the authentication system to be decoupled from -the registry. It is a well established authentication paradigm with a high -degree of security. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- realm - - yes - - The realm in which the registry server authenticates. -
- service - - yes - - The service being authenticated. -
- issuer - - yes - -The name of the token issuer. The issuer inserts this into -the token so it must match the value configured for the issuer. -
- rootcertbundle - - yes - -The absolute path to the root certificate bundle. This bundle contains the -public part of the certificates that is used to sign authentication tokens. -
- -For more information about Token based authentication configuration, see the [specification](spec/auth/token.md). - -### htpasswd - -The _htpasswd_ authentication backed allows one to configure basic auth using an -[Apache HTPasswd File](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). -Only [`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are -supported. Entries with other hash types will be ignored. The htpasswd file is -loaded once, at startup. If the file is invalid, the registry will display and -error and will not start. - -> __WARNING:__ This authentication scheme should only be used with TLS -> configured, since basic authentication sends passwords as part of the http -> header. - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- realm - - yes - - The realm in which the registry server authenticates. -
- path - - yes - - Path to htpasswd file to load at startup. -
- -## middleware - -The `middleware` option is **optional**. Use this option to inject middleware at -named hook points. All middlewares must implement the same interface as the -object they're wrapping. This means a registry middleware must implement the -`distribution.Namespace` interface, repository middleware must implement -`distribution.Repository`, and storage middleware must implement -`driver.StorageDriver`. - -Currently only one middleware, `cloudfront`, a storage middleware, is supported -in the registry implementation. - - middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 - -Each middleware entry has `name` and `options` entries. The `name` must -correspond to the name under which the middleware registers itself. The -`options` field is a map that details custom configuration required to -initialize the middleware. It is treated as a `map[string]interface{}`. As such, -it supports any interesting structures desired, leaving it up to the middleware -initialization function to best determine how to handle the specific -interpretation of the options. - -### cloudfront - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- baseurl - - yes - - SCHEME://HOST[/PATH] at which Cloudfront is served. -
- privatekey - - yes - - Private Key for Cloudfront provided by AWS. -
- keypairid - - yes - - Key pair ID provided by AWS. -
- duration - - no - - Duration for which a signed URL should be valid. -
- - -## reporting - - reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true - -The `reporting` option is **optional** and configures error and metrics -reporting tools. At the moment only two services are supported, [New -Relic](http://newrelic.com/) and [Bugsnag](http://bugsnag.com), a valid -configuration may contain both. - -### bugsnag - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- apikey - - yes - - API Key provided by Bugsnag -
- releasestage - - no - - Tracks where the registry is deployed, for example, - production,staging, or - development. -
- endpoint - - no - - Specify the enterprise Bugsnag endpoint. -
- - -### newrelic - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- licensekey - - yes - - License key provided by New Relic. -
- name - - no - - New Relic application name. -
- verbose - - no - - Enable New Relic debugging output on stdout. -
- -## http - - http: - addr: localhost:5000 - net: tcp - prefix: /my/nested/registry/ - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 - -The `http` option details the configuration for the HTTP server that hosts the registry. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- addr - - yes - - The address for which the server should accept connections. The form depends on a network type (see net option): - HOST:PORT for tcp and FILE for a unix socket. -
- net - - no - - The network which is used to create a listening socket. Known networks are unix and tcp. - The default empty value means tcp. -
- prefix - - no - -If the server does not run at the root path use this value to specify the -prefix. The root path is the section before v2. It -should have both preceding and trailing slashes, for example /path/. -
- secret - - yes - -A random piece of data. This is used to sign state that may be stored with the -client to protect against tampering. For production environments you should generate a -random piece of data using a cryptographically secure random generator. This -configuration parameter may be omitted, in which case the registry will automatically -generate a secret at launch. -

-WARNING: If you are building a cluster of registries behind a load balancer, you MUST -ensure the secret is the same for all registries. -

- - -### tls - -The `tls` struct within `http` is **optional**. Use this to configure TLS -for the server. If you already have a server such as Nginx or Apache running on -the same host as the registry, you may prefer to configure TLS termination there -and proxy connections to the registry server. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- certificate - - yes - - Absolute path to x509 cert file -
- key - - yes - - Absolute path to x509 private key file. -
- clientcas - - no - - An array of absolute paths to a x509 CA file -
- - -### debug - -The `debug` option is **optional** . Use it to configure a debug server that -can be helpful in diagnosing problems. The debug endpoint can be used for -monitoring registry metrics and health, as well as profiling. Sensitive -information may be available via the debug endpoint. Please be certain that -access to the debug endpoint is locked down in a production environment. - -The `debug` section takes a single, required `addr` parameter. This parameter -specifies the `HOST:PORT` on which the debug server should accept connections. - - -## notifications - - notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 - -The notifications option is **optional** and currently may contain a single -option, `endpoints`. - -### endpoints - -Endpoints is a list of named services (URLs) that can accept event notifications. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- name - - yes - -A human readable name for the service. -
- disabled - - no - -A boolean to enable/disable notifications for a service. -
- url - - yes - -The URL to which events should be published. -
- headers - - yes - - Static headers to add to each request. -
- timeout - - yes - - An HTTP timeout value. This field takes a positive integer and an optional - suffix indicating the unit of time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- threshold - - yes - - An integer specifying how long to wait before backing off a failure. -
- backoff - - yes - - How long the system backs off before retrying. This field takes a positive - integer and an optional suffix indicating the unit of time. Possible units - are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- - -## redis - - redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - -Declare parameters for constructing the redis connections. Registry instances -may use the Redis instance for several applications. The current purpose is -caching information about immutable blobs. Most of the options below control -how the registry connects to redis. You can control the pool's behavior -with the [pool](#pool) subsection. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- addr - - yes - - Address (host and port) of redis instance. -
- password - - no - - A password used to authenticate to the redis instance. -
- db - - no - - Selects the db for each connection. -
- dialtimeout - - no - - Timeout for connecting to a redis instance. -
- readtimeout - - no - - Timeout for reading from redis connections. -
- writetimeout - - no - - Timeout for writing to redis connections. -
- - -### pool - - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - -Configure the behavior of the Redis connection pool. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- maxidle - - no - - Sets the maximum number of idle connections. -
- maxactive - - no - - sets the maximum number of connections that should - be opened before blocking a connection request. -
- idletimeout - - no - - sets the amount time to wait before closing - inactive connections. -
- - -## Example: Development configuration - -The following is a simple example you can use for local development: - - version: 0.1 - log: - level: debug - storage: - filesystem: - rootdirectory: /var/lib/registry - http: - addr: localhost:5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 - -The above configures the registry instance to run on port `5000`, binding to -`localhost`, with the `debug` server enabled. Registry data storage is in the -`/var/lib/registry` directory. Logging is in `debug` mode, which is the most -verbose. - -A similar simple configuration is available at -[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). -Both are generally useful for local development. - - -## Example: Middleware configuration - -This example illustrates how to configure storage middleware in a registry. -Middleware allows the registry to serve layers via a content delivery network -(CDN). This is useful for reducing requests to the storage layer. - -Currently, the registry supports [Amazon -Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in -conjunction with the S3 storage driver. - - - - - - - - - - - - - - - - - - -
ParameterDescription
nameThe storage middleware name. Currently cloudfront is an accepted value.
disabledSet to false to easily disable the middleware.
options: - A set of key/value options to configure the middleware. -
    -
  • baseurl: The Cloudfront base URL.
  • -
  • privatekey: The location of your AWS private key on the filesystem.
  • -
  • keypairid: The ID of your Cloudfront keypair.
  • -
  • duration: The duration in minutes for which the URL is valid. Default is 20.
  • -
-
- -The following example illustrates these values: - - middleware: - storage: - - name: cloudfront - disabled: false - options: - baseurl: http://d111111abcdef8.cloudfront.net - privatekey: /path/to/asecret.pem - keypairid: asecret - duration: 60 - - ->**Note**: Cloudfront keys exist separately to other AWS keys. See ->[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) ->for more information. diff --git a/vendor/github.com/docker/distribution/docs/deploying.md b/vendor/github.com/docker/distribution/docs/deploying.md deleted file mode 100644 index ef44641a..00000000 --- a/vendor/github.com/docker/distribution/docs/deploying.md +++ /dev/null @@ -1,177 +0,0 @@ - - - -# Deploying a registry server - -You obviously need to [install Docker](https://docs.docker.com/installation/) (remember you need **Docker version 1.6.0 or newer**). - -## Getting started - -Start your registry: - - $ docker run -d -p 5000:5000 \ - --restart=always --name registry registry:2 - -That's it. - -You can now tag an image and push it: - - $ docker pull ubuntu && docker tag ubuntu localhost:5000/batman/ubuntu - $ docker push localhost:5000/batman/ubuntu - -Then pull it back: - - $ docker pull localhost:5000/batman/ubuntu - -## Where is my data? - -By default, your registry stores its data on the local filesystem, inside the container. - -In a production environment, it's highly recommended to use [another storage backend](storagedrivers.md), by [configuring it](configuration.md). - -If you want to stick with the local posix filesystem, you should store your data outside of the container. - -This is achieved by mounting a volume into the container: - - $ docker run -d -p 5000:5000 \ - -e REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/var/lib/registry \ - -v /myregistrydata:/var/lib/registry \ - --restart=always --name registry registry:2 - -## Making your Registry available - -Now that your registry works on `localhost`, you probably want to make it available as well to other hosts. - -Let assume your registry is accessible via the domain name `myregistrydomain.com` (still on port `5000`). - -If you try to `docker pull myregistrydomain.com:5000/batman/ubuntu`, you will see the following error message: - -``` -FATA[0000] Error response from daemon: v1 ping attempt failed with error: -Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. -If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add -`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. -In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; -simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt -``` - -If trying to reach a non `localhost` registry, Docker requires that you secure it using https, or make it explicit that you want to run an insecure registry. - -You basically have three different options to comply with that security requirement here. - -### 1. buy a SSL certificate for your domain - -This is the (highly) recommended solution. - -You can buy a certificate for as cheap as 10$ a year (some registrars even offer certificates for free), and this will save you a lot of trouble. - -Assuming you now have a `domain.crt` and `domain.key` inside a directory named `certs`: - -``` -# Stop your registry -docker stop registry && docker rm registry - -# Start your registry with TLS enabled -docker run -d -p 5000:5000 \ - -v `pwd`/certs:/certs \ - -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ - -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ - --restart=always --name registry \ - registry:2 -``` - -A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command: - -``` -$ cat server.crt intermediate-certificates.pem > server.with-intermediate.crt -``` - -You then configure the registry to use your certificate bundle by providing the `REGISTRY_HTTP_TLS_CERTIFICATE` environment variable. - -**Pros:** - - - best solution - - work without further ado (assuming you bought your certificate from a CA that is trusted by your operating system) - -**Cons:** - - - ? - -### 2. instruct docker to trust your registry as insecure - -This basically tells Docker to entirely disregard security for your registry. - -1. edit the file `/etc/default/docker` so that there is a line that reads: `DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"` (or add that to existing `DOCKER_OPTS`). Restart docker. -2. restart your Docker daemon: on ubuntu, this is usually `service docker stop && service docker start` - -**Pros:** - - - easy to configure - -**Cons:** - - - very insecure - - you have to configure every docker daemon that wants to access your registry - -### 3. use a self-signed certificate and configure docker to trust it - -Alternatively, you can generate your own certificate: - -``` -mkdir -p certs && openssl req \ - -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ - -x509 -days 365 -out certs/domain.crt -``` - -Be sure to use the name `myregistrydomain.com` as a CN. - -Now go to solution 1 above and stop and restart your registry. - -Then you have to instruct every docker daemon to trust that certificate. This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt` (don't forget to restart docker after doing so). - -**Pros:** - - - more secure than solution 2 - -**Cons:** - - - you have to configure every docker daemon that wants to access your registry - -## Using Compose - -It's highly recommended to use [Docker Compose](https://docs.docker.com/compose/) to facilitate managing your Registry configuration. - -Here is a simple `docker-compose.yml` that does setup your registry exactly as above, with TLS enabled. - -``` -registry: - restart: always - image: registry:2 - ports: - - 5000:5000 - environment: - REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt - REGISTRY_HTTP_TLS_KEY: /certs/domain.key - REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /var/lib/registry - volumes: - - /path/registry-data:/var/lib/registry - - /path/certs:/certs -``` - -You can then start your registry with a simple - - $ docker-compose up -d - - -## Next - -You are now ready to explore [the registry configuration](configuration.md) diff --git a/vendor/github.com/docker/distribution/docs/glossary.md b/vendor/github.com/docker/distribution/docs/glossary.md deleted file mode 100644 index fbe502cc..00000000 --- a/vendor/github.com/docker/distribution/docs/glossary.md +++ /dev/null @@ -1,70 +0,0 @@ - - -# Glossary - -This page contains definitions for distribution related terms. - -
-

Blob

-
-
A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").
-

- Layers are a good example of "blobs". -

-
- -

Image

-
-
An image is a named set of immutable data from which a Docker container can be created.
-

- An image is represented by a json file called a manifest, and is conceptually a set of layers. - - Image names indicate the location where they can be pulled from and pushed to, as they usually start with a registry domain name and port. - -

-
- -

Layer

-
-
A layer is a tar archive bundling partial content from a filesystem.
-

- Layers from an image are usually extracted in order on top of each other to make up a root filesystem from which containers run out. -

-
- -

Manifest

-
A manifest is the JSON representation of an image.
- -

Namespace

-
A namespace is a collection of repositories with a common name prefix.
-

- The namespace with an empty prefix is considered the Global Namespace. -

-
- -

Registry

-
A registry is a service that let you store and deliver images.
-
- -

Repository

-
-
A repository is a set of data containing all versions of a given image.
-
- -

Scope

-
A scope is the portion of a namespace onto which a given authorization token is granted.
- -

Tag

-
A tag is conceptually a "version" of a named image.
-

- Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest". -

- -
- - -
diff --git a/vendor/github.com/docker/distribution/docs/help.md b/vendor/github.com/docker/distribution/docs/help.md deleted file mode 100644 index 8deb6a14..00000000 --- a/vendor/github.com/docker/distribution/docs/help.md +++ /dev/null @@ -1,24 +0,0 @@ - - -# Getting help - -If you need help, or just want to chat, you can reach us: - -- on irc: `#docker-distribution` on freenode -- on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at ) - -If you want to report a bug: - -- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md) -- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues) - -You can also find out more about the Docker's project [Getting Help resources](https://docs.docker.com/project/get-help). diff --git a/vendor/github.com/docker/distribution/docs/images/notifications.gliffy b/vendor/github.com/docker/distribution/docs/images/notifications.gliffy deleted file mode 100644 index 5ecf4c3a..00000000 --- a/vendor/github.com/docker/distribution/docs/images/notifications.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":630,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":290,"y":83},"max":{"x":736.5,"y":630}},"objects":[{"x":699.0,"y":246.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-30.0,-12.0],[-30.0,59.5],[33.0,59.5],[33.0,131.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":632.0,"y":243.0,"rotation":0.0,"id":165,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-25.0,-11.0],[-25.0,64.5],[-88.0,64.5],[-88.0,140.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":512.0,"y":203.0,"rotation":0.0,"id":161,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-19.0,-3.0],[79.12746812182615,-3.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":589.9999999999999,"y":167.5,"rotation":0.0,"id":143,"width":101.11111111111111,"height":65.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":0.722222222222222,"y":0.0,"rotation":0.0,"id":144,"width":99.66666666666663,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Broadcaster

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":290.0,"y":105.0,"rotation":0.0,"id":160,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":12.92581625076238,"y":17.018834253729665,"rotation":0.0,"id":155,"width":189.57418374923762,"height":151.48116574627034,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":25,"lockAspectRatio":false,"lockShape":false,"children":[{"x":97.57418374923762,"y":58.481165746270335,"rotation":90.0,"id":151,"width":149.0,"height":37.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":153,"magnitude":1},{"id":154,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":152,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":151,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":151,"magnitude":1},{"id":154,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":153,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Listener

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":67.5,"y":1.0,"rotation":0.0,"id":154,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":152,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":10.074195639419855,"y":17.481165746270335,"rotation":0.0,"id":150,"width":120.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":80.5,"rotation":0.0,"id":133,"width":117.0,"height":38.5,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":16,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":135,"magnitude":1},{"id":136,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":134,"width":117.0,"height":30.5,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":133,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":133,"magnitude":1},{"id":136,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":135,"width":117.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

handler

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":51.5,"y":1.0,"rotation":0.0,"id":136,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":134,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":129,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":12,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":131,"magnitude":1},{"id":132,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":130,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":129,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":129,"magnitude":1},{"id":132,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":131,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":132,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":130,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":125,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":127,"magnitude":1},{"id":128,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":125,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":125,"magnitude":1},{"id":128,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

request

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":128,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":126,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.5154455517800614,"y":0.5154455517799761,"rotation":90.39513704250749,"id":145,"width":150.0,"height":150.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":147,"magnitude":1},{"id":148,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":146,"width":150.0,"height":142.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":145,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":145,"magnitude":1},{"id":148,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":147,"width":150.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":68.0,"y":0.9999999999999432,"rotation":0.0,"id":148,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":146,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":156,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":159,"width":206.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry instance

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":473.0,"y":525.0,"rotation":0.0,"id":115,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":68,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":109,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,4.5],[2.0,11.533649282003012],[2.0,18.567298564006137],[2.0,25.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":665.0,"y":530.0,"rotation":0.0,"id":114,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":100,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":112,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.0,-0.5],[-2.0,6.533649282003012],[-2.0,13.567298564006137],[-2.0,20.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":598.0,"y":550.0,"rotation":0.0,"id":112,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":113,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":420.0,"y":550.0,"rotation":0.0,"id":109,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":111,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":438.5,"rotation":0.0,"id":104,"width":50.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":410.0,"y":379.5,"rotation":0.0,"id":103,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":84,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":45,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":80,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":82,"magnitude":1},{"id":83,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":81,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":80,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":80,"magnitude":1},{"id":83,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":82,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":83,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":81,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":76,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":78,"magnitude":1},{"id":79,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":77,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":76,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":76,"magnitude":1},{"id":79,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":78,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":79,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":77,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":72,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":33,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":74,"magnitude":1},{"id":75,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":73,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":72,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":72,"magnitude":1},{"id":75,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":74,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":75,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":73,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":68,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":71,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_1

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":598.0,"y":379.5,"rotation":0.0,"id":102,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":87,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":88,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":90,"magnitude":1},{"id":91,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":89,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":88,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":88,"magnitude":1},{"id":91,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":91,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":89,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":92,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":94,"magnitude":1},{"id":95,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":93,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":92,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":92,"magnitude":1},{"id":95,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":94,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":95,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":93,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":98,"magnitude":1},{"id":99,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":97,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":96,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":96,"magnitude":1},{"id":99,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":98,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":99,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":97,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":100,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":101,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_N

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]}],"shapeStyles":{"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#000000"},"com.gliffy.shape.network.network_v3.business":{"fill":"#003366"},"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#434343","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"size":"14px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.uml.uml_v2.state_machine","com.gliffy.libraries.uml.uml_v2.deployment","com.gliffy.libraries.uml.uml_v2.use_case","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/images/notifications.png b/vendor/github.com/docker/distribution/docs/images/notifications.png deleted file mode 100644 index 09de8d2376d6f986374fceeb1e26389d3ab604df..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 37836 zcmeEuWmHvN6fJQn>F!3lk#1=P6p@k;xP%DO4boh?q$LFv6_D>&l_)yJI41Ny!V`a_St)_x#pbfggsSP!oEjw4*>xITSZy^83F?0DgpwMGCC^w z%gYy6gWx}iPS2DcB77g9+C)H*K~RyGeeRC9lYx<~HFDGGo46)xRj+0{oGm+?tLmE? zf#*Rld7p;Hll&n~W)9vWeH-JEKoAx=gM484n#RxU1l0GIE;pB#;+7e8gPSv6gT;SD zy|RuBU4H9%6`hxjPt7;L8eGHt%!6PMWxv}G-7@5ng#=n30!HlJ$B!VJwT%A1110pQ z36jt7`%wS=^V5VW)PKKhVuY~v(0oO=2>f?0)*c&VCK)97X8|9H$UNpbq)hhqPzD@T zA`;Nue)iE3!%LOe1E%f{=@7&Dr~kXUdk})c6eDr7&A;m-D5So*8>b&c;-O#8PF%RL z(7)^Z<@5>NT}=jwhF(VFR9S(S^56AkG%P6pJADjZHmlc1h7|07*9W(U{eQYWsuf~L znMMD***Z5Rvpna`pU;YnB)=Ngxwf5~;c9<3U`rEv^KQIQpM_GPW%1_f=*jub<=!W) zeC=m7j;lefN@z0PXw)mIuOkL%VpMJi{)}!REc5VmclwilmF>?Dav`Bl>Rfj}s%MBD zUL6hQJ#7of&hkDCGjJYOD7Bk!42b8mp%k`FfW}B_H(vg(Bl{kf(Gt~h&`HbVwAQD> zX`HYpLq}IN>zXEU&_<}iEx~j02|2Tf1si+aUky^H+Z<2mz9J@>;1Id=i+$taK>ryE49hccH5B1Qq9)4{O1yG zyT9&BdL9pL{HlDzk*gGg)9G-1GLg@ULoN3FsN&7EP<#QWUa7pg=ql0qZq4Ax2*PTDZ+fN z-lJJn*Ox^-os~+GF5S# zy0rA)E2nIy^#T7E6|Ye$ZKiT;S<>wvHWBPKff10p?^aA8kwnSNH@-!_{IiG4X;A$% z=I_PC57fr1!^hZ|f1Cy-g^k1P7r_1Eq&xpqd99?|7U-0Ohe{!AOZDnkxs`EeG_`-- zUZXfw(f#NC+u!TdV`8K(He|q`k;i$fY4jU@^VW6Jc&WDTO_i~D*A!v%1!fW!dX=}<_ENST5%Y@ZPfdTYLN@4DO@HAPIq0yqu z`|YGJAx3Xcv`!*QU$h?iXZc*H#Aqm1V^fJdjd|&PpwQnrSpq#FJ3rgXFM42PuUH?7 zYt`Ie%Z9!V1v5jD;+=dE`@BKl*QYTp@$A^aFOC|g4|eRa|!XvnyLZhdl$oUdK@lixp11=&NM z25%rGj!_BLYboz>x-+#EDWQLK>c7|;Vtr#Gd>(?9A5wIl!Ut7E5SFqRK&_7*FRYq- zt$vVpw;NE9GW9wl7FWPF>#dkD;-D0=8B$QgB;mxvTECh9d!6+ujzQNC1$q+HzNMEx zyS2J042vo;#%gWRt-ucSc>Orm#Ygwa^epspBAXr{3Z(Y za;3O5M(z4!p{%5~ykt0)K`#n2KN>5Owf5+C1)Iz*VZq9#tL%90KSs60=}G3H#W4;0 z@T-s#qr{fXIfVvuWNW-IzjeXbO#93F`g(5&tqtoI4q3ip}1_aT# z4o$RJhu-_mX!+EurJkz+iw@}_&Ad0iDy-Wn*q#m;2SQgy2%O2Ked@VmTN!GW)>Z@` z-q7EM9pw~1OL=n95LVe@<6={Cr&hd$^P~00Q51qLNI&y+crvCdY@fAJ%xN#ank@bR z60d{nB8A;uJ}URPIKLjUMAVlHTB+yv9bq7;a443d~3D7XNzdWZ$p0wVi}r| zEr$l%lwjH770gw0ZA4s(~7MV8n~Qwzxze@piV17%!Pj<_rdYDFPbe`valznss#wP<(me#TW(ampW~T{=Zj}~GJZAZ`}bPxwcngC$Q#d(>8p9o zdy~fOj;fOc_h?8gk8N@l^dyz8Wow4Em2l_DBKc)jO3tv;`NaI=Frh{stHe|F9%Z5{ z!neuS4-+C$aUoXgy}>o_bGa+l7dh5MW}#?Dz)@QAj7)N)Y*0$Spyv5kqlVg3S5h9M z0=Y@DahSWD4c}F`QN^rGU&+$17Z2{|>lBUC@XXLxo*8eDaunfbZN5?L^#5Rr&VQAo zll856=l~g4B74vricTG4{y{j?4qFzQQl}En96&sRXC{ykO+4uyw7r`AG6@kGwW}{x zz=iC`mwK6mzXvhC8$)#!=&E)$_KbceZ)Rq)L!vu}GCkJFse3y6@fBI@M@(RK5#d?3 zcUig{e_!k3S5q$pIoGp%ecW}pv9@1eHaWi{7I)FZOmq&6 zO*ZmWS@-7q6R8y0C&B9Zi}&xMsCyM46x}I)9Hq7-JoG3wSf-Mdxh+A^>Rh+Ci@~H@PXz`P zC_`~!HT1f}P6fIND6(9AGE_&0hmh6*JfA!)*g>3-?X#Sb5XwUHi($ zdWV@T(cL}OMTZ$oQ`|O2~3MgF$|%&^aJcIXp@y zmy7kw!YIz$S^ou{yPKZUa0CU>{BQBWcX4Z906@?z>lhD~JM_d322=1&o#YM`Hw}Wp z_|9s+3wkLW4{vr%t$_~VN*OSP>Le%kQaO|6wlXzIr5*n@jVK|%4_w)pw zmvbCbCQ|SL(m^5hPUblDesoW=q$ExFgdq4n%Nf!{es`mM`*Z2tVj-c>y*WILnFQyn&nAul>S=fZHnvOHm(gC(`FLR{vw zu0B;^t@CV3R&T%g%cINFUBYnW#izoyB~7m&iYoO&j`4U*p3H~@${dJ;VTZLY+e9)E zBwX)mQ&KTaUa*nGr%b?#BD%j9rPF^qS|8Lg=}18H>}w|v-QSri8xhJ0xUd`fjO}&U zu7x!Vo@|>_j*X}4j$yG&ha5rxNSOk}ddEr2_~wjV)&BYVRo|^mF%Difcvgf<|FoI- zZYZIULrXyCPsO|-sxh!wYdjp!WUR=*OJ$hOCpV6_Lij{tSWs5zyosoKPy&p58AEbi%$xj;NX~MOhU7z4>kdPq(g_fnsr&8s+rF_E#N*K!q zzKOd-J!QX~^x(uEhPd0>=q%nZPo7*{yxEw?jN)z`5_Ab7WfGW|IfUvJzPCZeGAWf2tW_$n&#M2bGJLSm@kQMK*FP_8Hx!k~ z5v&m|PaTOkg{C^zLyCDGtObwMOl#MB@AI?A5Smu7PRDCM0j;8#15epIB zapbew$*$T!TbC|;p*2ZEyxDp-sCjgYW!Q2p+qJ50tn3w5iB4W`k#Owm6)6noP-4W& z2odamVqG>r<#p$xk$NyCXAIU@0Pa>wPP*ht&pS}2wJ9|vV>S$>1%#~!pB|WG^AzEA z_4bYwq==n9YD~fXH=vi{r5-3Wq>xX!VIPID%N!!VE+2m$7`FhUXjece6;l&I9-Goy)% zy09QfnFEXhO7Q8dfj=;4MI+b(HRo$-=3Ap*MYSDClHoC={s{;Q*J6$jLfk9sYE_g96|zLHKike&XX<&X=(NwnyH-yMWO_z_f=hkt(jHH6nRw7yp* zIF!P6a|YjQx7eHY{<{9TylPS zu+5kY^{d90*E7A;*ZNX(O#tF{-<>OW@Rdrd^=>!9@7YuaCBQSw6MtAQAH0FUY(N zLu)Z~>KAkz)ljNI3*-Zv06YyQd2o|>-;iwf;0gK5tN%DD8%N&DOe< zLOXG$e)#^lw*&5yzQOBH(wphZZt=bP+@g~D-?iH-sg{{>phCU3Q)W3hP8r~Nvega# z^!40o;(Nw}S3}unNW*A%+v*pH*3^oI-=4EY;hTE56YHR_*=M~y)oXSD0iYt!do@v~ zClsI2?B?qHa7^bVhUY+o=gCid;aTT+iL3oq{F)YffygjQ*ZNAwRv7lryeG6)^No_~ z-4`d@=1Xm1Nc16oxp`5%8}!0fKjmMB=ze&JLN53^;WXBI^vj3Evyi)dMhO-T_b9Tq zP}Qqp1}cFdhn@ts3yiFCPQU_;=@9>#cMi^U*6{MA^cV6`jHY5cBSUuEp`2~-a`OPi z>?$bzHS-jsD8BAPxyjD~%Fn`RvAbJz+^WH~l z=liX=tB1Wj6ZWS@RhbJQLc)VbB;mrAb#i_-z|PMaJYC3zZPN>k8eX7~O6sFCk=U++ zk|L5^z?qfO6q{b~GoS+4TcDlMiJ(gE(tILG{Cm;Vhs+dI^{=QII-j0`(E4GM&WIDw zYx7g`aYh?>ykleIp@YYQf4&rHo;TOyTA0gD%?y4Eb2rL>2T~us}k9iKi zVU)H*G`rx5ap6kr``u%wqEGKI)~!=Z6FLORa0|_;>GOm^#cQq2>8c`_+!b#%A{no- z-|0JXmn|kxtXiFUtdyGfzP6*5e8stc0iYe^AOqCZ%%br44wkkV2m@1#_nMa2z3fPb zZoOXT~X%HiYK%d z)jz{mYB^(SPAs_&s=yA%pP3?S$x!m+Z&v?)ppb2xKBj-(Gc_Y)R$`ye`=MWtms12j z=oJJoW0Kn4WCI=HukZ4L{|sYOrsMB1(Z_dDenA6-yBKY(GKJfqAL|8HW{u7|C*SRM z@l(RESD(ryZnXh<=3`9L&rgp>lF2k5h5EgvTna+E7!^>M$|LTy0dpjx1cDgV<2+~% ztGL=|@Y()k?3CuO9sAXX{E_^~kbP|~vT#PzMHcRXG_lT-JK*xGtY z&lnF62iXtm7Y#>whwGB>(-6wV$}B_32rAY~$4EjBH3TG-pt1Oj=>=JzpM# zItjfg_s^&-*0NmfNhG9WxhTclL9&>mNqlM?ZOzD~I4vFVVDQNc%QshlIJ^2cf5Pcc+Fz8deBOvlly2 zsgi_iTFF$0m6{&_>}!gFCk+I{DqF*^TKP$hk@WiVuvQzT0YzNU5nRCo*7h37fv>^WwJm_H_@Q*~}{$`-82H zmPmXv`&S?uwaCo{xAz|1Wr0-srhYm8)`p?y^u=WFJB8PDNjY`#MtD#}nMpElLGw!h zJiUr9cq1b+d4fDnw#I6EW-`_8m1dys!cP$^3GtTn&X63lH&&tR)7=lsYY&E|_y zmz9c5)VFN>d*hYL2ZeEGb8uo8B*1w1kL@vw*Q!VDT-hSRw zzy_KFHi;df!g^x-$iA0kxgyx;aT2G726wm}NG>ZyzSq_oMjMLoh2=oFU&sp8C6!b@ z5>cJGe_Emhs2OdmzZcs@Qrs$wdYz#Nis-!Tu~wS3&Q{IKR)DMelEALTJ~Cx;0e&tj z)0CVOIai{G5UlhneA>2U&U9I>|^I*3ktga9-^z0}BT z!aCm^)X#Sjbkn<9wW7J~plGNw?Hxac59lkw{^}Od@n>U#sc=56r&w7uNFE*@?_&Wn z_(&m6tQxg5Tc=a}mJJ)e4?YI^@hD97F+y+RnCsAuCn-8c9he`7P%lSIUA)41X<15; zB80I*hWPD8w3bK&r8Hvb;b*n9mFqwAG3Y^*uy}Pb6+rPZNM9evr@uX&@@8TWz{3T* z_uYLN4fQ0dR&z=J8mIN1vx8-5Qkh7WGm;)* zF__pKbpQMj2Ko{gS=(3U22 z4QBT&0wJU*{tJ(R8K`v_LERQFhb#G9%z1+jE?VF-DsJ=`VBOdG_FSc%z^?39jZ@w| zy1Btius$?z$#an8&HGaL*kk|9IK`Yc0!WkRl(LVe5v(h>Qc}I~^6y3Ry(@s*vVC+4 zU)%=_cG-Xv!`o2tphneH{%5-0tn9XUK7gK^JzD}Sexe+Xs64aUdnSNSbO@9wH{b7a z$2Y~F(kX@>+%1R1JI&XBSbe0g7^9@mEvqQo)%n z*45KdQ!EtoM{E5Y52Uc_T5)Z$`l3)E*yjPfxf0cSO~v9r599q=vrsXmnYJbJD2&2>bM$L`w0t(st_6@c z9M2X*Fg=?ND4a%ky-s)b)sSri-gWEOTm5_)i=gN)b+&+<@w$Z~@|#w^7z|aDuo_=T zxF(cL*tXi_vE}UV$r3rGI{Oh@5+5;bWyJ(-q;ySJ1Yz!l>L$}hk^nzi);1?VN!6aA ztH{JEMN@s1>rJU@56%H0Q0xho8|PX}vEb4fAd*~kDJ7-)xS+2nOe5yVBd?MCprH^f z&pABI6x{=2SH2cr30R9|U&;qbhrY*`V+$Rv=zO-$PieawQ3;&RjiWyJ>@2td*=)NO zv7kp?J{J42!N+Eb`Om^Jb`)+H#(y1mx?o?K%a{?inbUbwxSPLukLI6eUz@C0gF;Z9 z315e*+Ln%1_seEd^WMfJG-eE+sp_Y3y|=pPrVs*t8D)XCV$tdOdKPIPOANKRHXPe4 z(Ri3mP6H(4@`3cq_>3YVw3xmfU|sGF*~J_{EdSYN;OnDZuF>@_p2Tz-n>^&3--@bw zGp)VOq&7(E&>Y<_Z+tHT*R}*g%HU7DQeD%e(7T7qY2O+s>dDww?aut#e)$*-WiyI+ zi?RW~!2JgZJI)^rat@k*)L4B1Y3h;3W9D50?2$S*yF#Ta+oyTz;brI8rq_r<`~IvC z=}LI_@JE8XAD35PXk#MimCsCJ~)TK_-eO7JO(SGmiH`J1L?! zsrdpLEf2-^LAswH4`nedvvsOWC=FdVhoYFmrODGqX|^?*KH?-0M^}2y?>^12ItA)F zzMV877&Oij4BZCX>8U>5jBhWk@zW{^+t9>S>)-Ufm&TcK znJ@%GT!HEwh#h_vwNl1vnven`r-dltIc4%_!5t5on;b^9H8&z^w3t9hL8j3H(6_L3 zmm)owuA8ErfC#C+_1?6hR&%+eO$w>3LWb8^k~sT^%9U#&ia+A^XHjDe%Y`0}qS5WZ zicBW^W)#v_?&(T2k+PR^jd*B>uV-h_3W9?X(?d!W$B+Oa{Y==pOUzg@|(OwL}_!Pv9b#Sx8CjA~(}D2yoXJwvRoRj89~mAX47Bf*34KaQ4$p zGPfZ`F%9+T5js?IqG-`FtxBEk-t{Uzg!?uDR}U*gTUD)S^Pl2YM#C%!W~*)kMYX zIpt2!O7O~~+jHi-PLYw=)=7--m7LdEN~I@IN1Z=OD=ypHV0@PV@rQ!v*XR2Mi<$hM zJ1OFwM<+=`+e>PZH-E*fm8ExC@b<(OKuVwz5Ab8x5WD-B|r}wsl~vB9mk(zleA0d&$9M_ z^?$|7HxWTwIDy5@FzK8Nid(UlD-~Dw<7rtSkC}=47TE& zkyz+J6voQKKfp4=56mmM#Lv=&`^J;j3|*?ErG}%9i#cE z(O&1*58uBDOZFVRkj2FiT33#X^3Ka%o*(FIzfIol(x<5|FNJz|A3rJ%MU%w!F3#8_ zw@f#wzkl_H)GWTUWv(|8lNyUV^1)a9rX}uU)WSu*-E(@OJ^|cRRZ;&@GQdR*%cF|yiQ0-xdH2)$yZYo(r6u!6NziiMR zPVDj=&1e5PI+o|u_{C5P`$7%b z^*Z`T197FkNa7xwm26S04@D?fT2ela)$C?$C3%v~$|n?~{K<19Fpjru5l3jg`L*4# z8GWp70Y)Ir*I9X<+VVZanEs&RE!rioF)f|23Ye_hWmyPMAC7-yG^O0cc>PzLwA?@U z0yBA*6nUuRe)E$3f>8_e;91VF(&28nbn@G`Vt-3L72B&n8l}nbDJYg8O#}3!5NYNK zIbt3Pd3$F)ANR(ZskLWe%H!7duDRYYG*T2%nnwed%@TzbtdF*T9!Exzi6_3-3x2aC zARGeps$x;%gQ!m0gDxgmo|y5M;&w;E%I;je=l52ir=1h(M7eLQ-=Cyw&)OTT^9CUGuHWE=`Q zKv{q(ic$q6)9zI~6=Jxf;|y7k90bsMM7WgdM5hI8a|A9wRan~9`~#?j**hgo)es=} zGzHZoO1e2u8INGm6ULrrC$7g&byabo@54+++MWjXQEw}Jbt)l>sr5Ee3Q zyU!`UmP6n6YTJDUAB4vVVb_~L<)wp1P~POIMa%k2%A9C>vc?Wm9>xMti{j7l8fQ}2 zlKt+8LOP6A5-1_&QA{g#$3#8qMUUV&IUrlhICw?wIRdBm1}x<^O7QV}U<#%de<8<1 zR!b~kXJiq-^^8SW`#!|s*Bso{O`Msc`5P>{>e1@zY!<&@jxZaVhq5?~eW54~Q16)s zts%Ii-jU7v%XHenUxLrs7r-Z}I$1I(0m1hompC4h6Sh>l(10 z8YXQPB6zFtGJ(C;orV@wlY^|;U-k*92Gffly6AzJ_dM?hOKMvv8=Ksd zBC8c>b?e~Dw7W7Wt|hWsL?Vijk^47F>}Y~px|~x zgX$dD-BL{cpKRZ-m}#$~vw(IYI%fm5MV5Rmaq~{qj%ChygXbF37eFlVC-H`g=wm4H zm&uSB9~1dK6y~_arv<9*PMHq?YG!&$h4?a@AI3`GLk4Se(~z5ON8Eoeo|)Lj5H9bf zl~UC<^leknv(VsR1!)K>PJ|jZV}9n8O{}utWjN+9COyN=$r8TkQb@IduIHJU3;&^ zSPqs``An_i3hN1AdZyuL`odpO+J5M5Lz&E#6G^X*HE&9fqOF%BtewtFDcga^rik@! zcC-ak8aWb2CNhGrBb&v_QAa^!O~L89w%HqkVbmiA_1L zc@L2#p_vVQnvL<{)E0Smr#0+*i63Tu-!3e`CT}s5I-+);HJ;D?7}+$+!|QU1XL;b3 z23Jl5K~pP$J2-?F+DwQAc&>C4)~@j86KOm%&dOHn&Eg}1wXKq0w07IgmBGVBt4@+~ zs&M&}1^o;t+0)itHsoRSHr~AHQMl`|BbrA*ywEm;YWmc>;=1m{1|6lCX5Vtsc|fzv zVBWI##0vPM*n-kkz}s@5|(7T zPJFJ@2BR9sPZtk(Tw)l>8sJMuW(H(3huoe>rK4ZceJ1Zk*ZNkN{yb}6etI^Ey|oNT z|FL$Xbe+ohTkaAA$>8lSL?L;p$-crvoqXN`rSfq%-!BpLmDN>@(>5c(VNf-02E(~% z0ytM}TK{uxdS@dTQeuTd;zjOn5hMC$%wFc9?urY*N3`~tEN~vW2?I#);b( zZHDWQTy!DDP6_ioC`B!J21_vyWP22=cHZ}pr;lg3%zp9VTa5sRKhwgBDEk-vFl@># z?6CR6!7I3m=cftr<^cSw!00wnq-sZCKkMnU=_ms-E@9WE#4`*E><*oV!7%t3h-sR-1_RTybo=*1T}Du!8Vpb znv!X%k~Ba5W=ycDXCW7CZcW~Y9rks?hKr?l^$n7{rE94Ls2(ZGK{ea0*lf>4*GOzW zP7R+c5j8qu4H_}$FBX=-bEs|^aj=R9vNDdAP{bo7CcHGiRH?9CiutLjGRu4=mZfl> z_P)zsrREU@FDf1hJOh@(rorj~+X(84ERig9=$#=17y<^^#3LyqXnZa< zSthC)+P10b``m+G0lhLnf#fzA%OKq_H)WQ#o`g_W7MoiDyD3$)D8JZ8RhMrm4Vv3+;%EgIgB8 zwgJhKod3P{Q59qR(8Wr3{3p1rch+sb8?bzzfP7byvnYC}reWcM5t(dk)cl^VdIBtr z%o7GqQ8GkIF*IC1{@&DmSN8^SVs%IkH&Og|wVasSZ%)AOjVxO3ylt@Pr#L^Iw)+HJ z$c~_d?u9F)DLk+t(@cuiQMmr+41BI@0~wS8>vr`AuyFn>1Fzi*xFFnX=r(&l)OYt+ zML<9PXVp{&%XA0WJ(oUZ_jk7{B^@I~VoVce-*o1|V~PF>ED5urY}o)Bu1q~Dpq+8P zY&1ZjXjmdt7m>YAnd9$?;^l=GG0uB)hP!yD3K1k2G((-jK^F;{>KejWIzNDW_O-At z@1TU$Po~YG4}s3RzqHz~K$V;TrzSaQ7IB5KH|TRo)o()q(*oD6e__u-qKNyLq@!y_ zCbFN3X9v%Kv0fKK!u6T?0pz;46-OY%5ttLLs}nhNUhh$Mb{MCKC&|9pz*vR~uZ=~o z_9hS3UbmEenw{K^0Cpw+8Kwm`je&7?zsVNkbxJJpKfBX9Kvw&_4re>(*KlEtxl#Dg zdb=sI`ro|2WX=wN;~)aZNiGyHC(OXZF!JZYaz*IWtEU!Z(lNzm(1v1Fuv5bb7t$S< z`rhIAFPa1X@1_`Vp2=V`9(gt44YXhdiSY0|VSoOGE`_I~UO=j-@nQ>6mR;-H$JU=I zd0+=s+&k`yV_a>;l_uhp%KfKy3IXko`aA9Mc#fCNTFIwqYH_$gU=BF#_v|FzCMQQ< zG;+(=v3!TYqvK~^f$O2RByes1B;bhIXwt+U%Qr=!BfV18trc5%Bg&8bnGV`NW9B}9 z=%uY%0VvvMWPzdv$DvUkV7sjX{xTjgpqn$?u>6t*%%crg-+kX<1=3A4R~sBSsnQE3 zsnx`RW(|oEIUZ;*hSC2B_i_o>gXgF=jz`R6a~hq2Ray@Wn|Gp zsF-C-hLbRwYRQ5y#&O{+>1c)E_1nxb4d?ZDFHC;JkARsd6eRBu_A~_`X8^fjUkHq~ zYCEKc@%FHb9-Ncvn+-|g?HX;G2C-!BJMCKZ-I5If^1joTQoZMQJKYX0JEdWZ^^ru( zb@a^;DZp+jPCc4tKqHKIeK-9t+nNQDzNB zohlfvjcl>>fq5c9()@=G(-9n;5x~Fr=s)RjK@di?n(#|j5gD4MF=OT+C7L_d)CR^T zA=5OGgXL2l3*C6%IzjVFtqVl2pufr%}gcUGVkW)SFY z$HgD3!BYs+f-F#LaJBE1$=@Evrh)mY(c9y^a~b)e0RSVZ%hpG5cXC^xGOb%Ql#RlF z3i>ACrX%A|ypQh=YgC#va}2&4h!&z8b!5*J5G0A=BY8j8WhmbOK9;6!h{XM#HEdbX ziLh9iYGfx$(@k5c8_f46ui7THP-28_ZH;;4qoD!KCP;&^b-=FLZvlh}v+2q=>|L69 z>L8XD$CM^>`+!YLDu5LG+@A^uB4oUBpRg!fah=>Q`8|+lGY5KmBV40BY<|7~`{se| z%;ZTW(t(nciRv#tfsT0iK#lQ%uSt#3%8&4qW4T-S1j;O6 z8Hia4aQ!=G4M0$+K^JlU@9Y1M{haD~P#U8;+@ZvHz)SG&p1WM(c2FZ27BCi}cfe&M` z^#pgnBWR(yN;*k6)X!6=Zngw=$_o`XepGF+&4eKE3oFMp0g6@-2DG;a?I~BpR1GU& zDPr0_0#2&txl-gYB@I^uhFrLk`UVNC|7Qrm?b6SNM@If?uD)ip=;ceehQ>$bcrTze z_!EG`3BzD#uh970HJ&SCW?1J$K7@<1aiD3ax8CDu1@KjP0i z2?+9V%QxXM1#q||I1wEl9euK%_#UwLXWsW^CNMk^ZvX*LU|m;4W8oPx*Rs5PybRa($9wbA6L`wq@9w*l=%n(Sn}Jpc{x`oKWW1aMO+9dtoNfS( z+Bvo<{omGb^@E`^R9gf4hVL>^YyEqBrZx4vjj#`THixxgJ_~9 zr>>mcIp{HoubT6GyxtACLv=W4;Wl$(r=BoUUVM&94vgo1f6-G=o{}xK4{@(8UUB!_ zdf;yE#@+n@KLV1mT<8-dy7@ zb(xq7-FpNG*`WFI0Z$~735x)WLTXxwJmESh#bka$h62ez;W0o9MI7}*M5bymzMsK| z%7lo9VZ4oyfz`0NNBN&W6u8KVR@+kfApg>DHN5Zep0*;Wt8Q~< z7$|mF1|sR<;RIe66NA8@Q_TPDUrbU?2k6GZ0_(rd$M0~zUO-~pe|FL(ke|Z@pl}}x z{9_*sk~?uh7LNC1MlD07w!RU#i2{i_U~T)|E>Q&_0-jb{efLq=}`i!L(_Ww(Lnt5 z<)6-s46q4R;J6EvkPaawUh-$aLP2g0xdeQG6Ue{ zV${6d=Ptl`3frw+YPFkNcS>OXnH@zN~ zv&HLLtCX^vlPGKhtqgi_gQ^wi5i2mLd6vX&_^l-!Q;_QK7(3~!;>gDPeslIf!DNz_ zeu*P}wZFd(>f|wg$+LyPL!j(G1_r)kb?=R#51@fo%0<7_EG*rzht0Fm_wP-q)d;iX zUm)#%=>kR?{aYud>=QE7d0_o0$}D>JDIDHE>PaIEf%|)QfT=l&=Os57Pz~I*ytN1C z27qzv-cPyo>W%vS7EFro#nPCh+zFs1Qc*FIjNTmhdiy8`*YW&q-fVw)IOEr1Gh3_0 z%YYC#kAUSIS~9}z@PtNF|%q49dN5AN_w5X3Zst&t(gU*h59;smGnKpla+{L z30$h1NO3m9Il*vI;AgH9F1e`VN4S)2@&a!i^d4mTH@sc*t3gevvF>kR6i)-f80NpWa1lD81 zFL4DuzQdS#3}Y#HAMbcf1Lv8nT*7b}wUAB9Y3QO)_xsu>Gjw* ztHEmEUwH>IV@Z*IfdFQ5tk%9w`C$umW~BY->gWgp#t?*f1V5`l;YM|i#XDT+gCM3`PR(jR$} ztkkpm(>IWFV8X)Y=?P0zGSFQll{*DIT-SJIG=84T_W0%lB3qaVckKqlHBg=Y7XA>G z>>8qF>^dlSJNv|H`6VZ%KpI^J!uQ8I5?`T@0~$?ADu&JsB8mQ`&vDsJ9R3!EZ z!fy2biECYh(}!e%AIKf=OptU9p=a5UN0@ zKYan9Q;Sn4SxzT|3Y6D7g9Fw&QS)p>&q$rKsU?Tgdk592R@aR9S~o$ogZdnl59}$J zyJPvus0w7zt%)L@3~{${6G3lWiU-OeWk7j|VWS4Euv^4~Pfh8`V3oTa)oD&^XQ7PI zLp84pgn~CZx6IRcub2_V{cUWvbp;SLI^t0+j=zG)=?L2Hw;nz$!0aa0Y}rzPWDN}@ z5|pJg;RPbVN=kaBLYeCqhM75~YC!k3voa-Sa}Kt)foaNgpNdi)sf!Iw_hCNWOVDuf zqTVv=ZRHo!NA)-1kwMYX_{REAaLns>MT~rdR|6zhTe|kW)$#p!f1!{ED-}&(Np}*M zo$3#rLKZo$wwm%42Qh#rvn0|$F`CS$=_hXe({ zR__p|J()L^KC5X&uQUU;)t?Lk8rFLgl(2JXqgt>rT(tnZRfOMV=9S0lRVc_S4d*8& zjqr}dkdg1NzbfbSEM7(hQ90x(t}~u958?k-2Xr+mlra%JPvF9K@aSATGsa z{52awvr+zOuixb^2@hs>GrV4|YhRjLsnD4tO~T5-TO4Yil;B&0+Ld=J%0_cD#*}5r z)PlPey__a`tc(;*$ol)Cf~+f*C}SU}@+;lK9Z@;ntHgtz#S~0pSL(7qDQsOn-?i2n zc#$?=@1<*U7ofUlLX@+0k*c4jx@l6HDAG+{*Y?y-J{*x7AW&8UQ@w6;WZJCKq=$rO z63T~#GNo4;+3y9Jc@4(DHzw#LkJQNI*O}XgO6hLXWj9EFNgyVW!G~m90j}NgEde85 z#ueWW9iV^7x_j+EV`b>!KQ-9ULbuO^3nAd+?+mB-(?mw@NIE+IhX6EVBDRkFhf8e% zXm%#F^!#`H6ujiak>2>qViCMR z4{5ng_hFXr4IBI|8ldUP;&Gg}Ry631d<^o_N_L<-xw2AwNkd+s}T2P?oKP}lEn>mt%9k#*F$qPLma8^%~KN47ov$jg9Zq;_SPm4&Oxi2vTDXh z8b)X+v+-nUgHpW8-x6AoNJX!A8P+u)9CaBOC!BxwGqEPqWJw*=I9+WjHuoCzp1t_c zpqU^rubzVZuh4k!gQe+>5d2*+iWeB5Ge{AjUU&Lg2{T7AW#$l2-XUm@~2Qy#$o=#bQa^3yq2)Eez!!ZcV!> z4{cdkwh@T1aCSql&b82Hu3itkx#R0Ru%!+^2dkfJd%RGSeirnh!5zy0c(8y@H0=+W z53JsS*M10WxdG47)`XcGpo9X1At*?Qse;x?k)V|tZn_tqbI*MaoYypOk2PF`Z_d?_ zZ1Ie}+ycLeJ3LF6UF^}7R7F5K2S#)ikmWuBzZ<->u3HTlQ)Hrt17U1h-=0rm7p2V< zn*`qD_nd%BHR;}m-Tb~c-cvxhYfPIAlFJ6|Io7!3uE#yNqAPd}7=FC-z)wW+2PDA- zRXL>Z@C%~kTgN$`{SqvXoXm%SiThNo)b$ zoJt!1o-O0nfF?VJ^dWK3iB*LTR;9vthC5r&Z+r6s8s?_}MzMicHw<8>**qlck3jAd zWo!~rv_nRfo8nDjv>p8tXDmV~H|{P;>)FhEYM&A#gqo89Z114p@X~Gi_wJMjMrUdu z_JI*nea*)QBh;#ooQZ%GFWg%afp%a~yUta62|GYB zo82DE&&v@9-DDYF9k<C9kFJkW4khCZ1xY>b@Eg?w+kce~IBsSP&yfVS3KOgfXf;ai}02E!6UoxC_1O8`h9#k2e zWQ{&R$5VR-o)f7ks32N9%VYg0A(L~n*<0%AcWtLJgsh+`8w(%~(l_+^8;d}Te(4l- zZqj#|LmH`I1Qu5WD>g~!2e-)N3FsG!hi}%>`Gl{5PuB{!4v04a&O2+MsYWNRqu7np zt?uP7l_oYMn?3b7XK9-7(#y!yfDnvqQmjCF8!SBl%hCmGi@;>hQzxmo56@2hl8^-O9x3w*>-PT2bhf)P%3?ud8GVCW6g2$Pzp zwzIsJR!n}2k_BHr)B79HfvKE*Vew?D&ZRZ|F8+e{m~cmUd(3V74by@l2snG=*iN@9 z3sWo<5E-J{i=5oIRD_=!ASxR4h5N|wbctwS09n@!_WDrmPKC_uS!Bht!ZmT|H}SkB zMXbBxFfX?r#x?CF_KGwE29JI<8)6^xF(G|8dhTJ!Y8Ds(uqp}|Z!wkCbPtST>sS8i zn_5aphov4Jzql!7 zleXDQEmHeY4l?)J^rL|_1XG0O!`6MlxgS`v9oP^EX^|^uADOJ;Io(1v*-$LPwD`1) z1f+V1gXYn3{1L=nwmnw7kx#h7xH%w2m9>knWdGwJ*zlI$TUdKip@Ru;6%izj1e~K# z42JWq*t(?hpITHYgY~7b7yggl-ZCo7u8SI_ySq!e zTe>?%kOt}QF6j_yY3W9~kq`;#?vf4xK~h>c`+nZ%eaH9bjB&m{$Kf9?Zmzob-fOM7 z=A8TI*1l&LDm$THNp0HF`s|>Pwo;(&Dc+uL^PqbTt^_8o=(xku9#SY;*qarI5zDXR zY!xW?sDnFmPtG`h$ZRstQW%2-#m`CA-$Nz7UIMP74G<-U#8z{#} z5N7A zasbzb(HWcuUD{U?!6#pXBeM8EG-P=Ydu!t|f^ggw4zs~KjH?SMMyvq!fmm1Zd;|%T zUK*<&O^C!Mly^8p&t(Z>9pabs@J8^ubN2Lf9e#W*;N%v`bB6O^NM|~E&1}AEk`!gs{fV)_hE0Y8~q`VqdN8yh$xV5LjN^Ta%oT# z3Ap4-u=?HnW&?`zWPr*K^Gu2qZ3A;XSP*?vAfQnLDCFMPL1-P|vgJW|IL?WoQ$SA@ z9qLf|2V|2d;lzcc_M{_x_6y~w;IB2AwX1AUiJPQ!O*SX6W2lmSo*TXnDl>n5TGCYG za4y}4g@G74k@+9j3l+DU_91}ClmIFOWB2$QTJ>xJji>9@Z;Bx8*aHX(A(DWxG_rYb z?>Z=cGXV?(fq_T}zawr-YDz$zmJFPEm?FHut&gmZdj{_TSqyxQoQ?#VNDimN?jFpv zvdc^#V;NYG@>@Ltd}pgSX8hkkp9e4}g)1d3f8ASAAaiTdfoSl^N7!|0$TmJ$QeCX# z!W}!Novh)$p#eGW9IX46But0*>4Kpt#upF>h2Y}gWqt5BsH4bUJH&H|qfkTCKNefP z5uv_TXx_7l!Gy?lU;T@;L7P9$&xBn>As>J)q3eAI(?y<^oRQZ+dW8%Tkna-!wo?Wd zCi!RzyOBObx28)QIatbxrz{O)Oa~U+xK9O){zViSsF+xsectpe!2ck%>)iv2Hk3I& zAHh3G8$>js{Fr9Wve?B7bPAC#Q~d`2(CQ_66MXt7v92JAIXi{;$0vQlx;7)>gjTf4xeAFAnUzh~Ut|93KJ%1V2f=fq-U7lzf&2#?gAZ;>w?rBLemol@?uw@~ z|8MU%1r%qRy@TDuSe(MhrY~7Pjo~Z-F`!lVoT8O`a?WSc7DyC%EB>D-qByaa58{x% zRp(L&eIwl1!w1zI3;vhOneC>d*SUB1f293?x zk3{>jy$2q+rOfCkJDeVxle5tMlP6TI>lmqYrMA zzIv<|$`}v@G5&YGDy`K&gMx-n>03OO#OV@iij+D!<)KEcp}gJzF}9S3R~f}W-@8f`h}DJ%sVd{#DoSuJ zgD8E~NQc$Bnax zBl4rw`R#W*%T8-SETr#OqaauI1aD})f*Np@j>6i`p+Z2E_!UDBy5;T?PE=hGrxQqn z;Hc&QFn~e4-9yGXgN1JKcAm$PT{igB^TQ`t0`Tm334x=*?CF8A3uxrRQxb{z>lex( zLy9blOHAb3IH=W*R=e1amp+wZ8KenV!C2pbwaNvcj~z^TfFAfB@K{U()`cSglKrCz zU~)*!jA1DbWW7gX@9sTB2T+k9E~Sv=6BNF=9qGQs#+h+TVBng&4}aIYPT`TvvAqw4 zg1ZB{db8I(Y^`KMY5C{vr-3pfFHFd1}@4(eV$^4l+{++Uwm!u;I~ z!#t`af2Uz%#clo1!hggxvR;n3-x~%8$v+r!JU$u%NnEJ$`QzPXiSOn9%0mRyPHGe| zzzoGOrSzQBcwHoij=%Nz7ywBWn(XG4oH|bUdwS~tr_i@99FUdpzzWP^lB7r~E0}zh z*Gr^m|r!C zLG>{Q@4n3i<5duGw%z+CgDb{z6XV-}1F&;p-P|Yprq~?=`J@KRzcCLMM$Tbcc)iIs zS;A#J!U23k4MER=MiHX!`zo0)opXU4Ulr0)2{EAr!gVo5>P7_^cyI<(-QwAS`NhNH z@umP(s07i1)_~*s!4UxC(sOj(7S>b^JaWg6*YUAifuyB44gcxk>aeJ9sf%5N2c%O= z>pYr!U@W^>S5VLg1K>g~7U?HeuODbmL?qOop|UiL%l?WN{yk;PMG*E55I|?T-ubry z%oSqiD#YQKYl|T-fr{+&9*mhN=Drb>|BSQSa0!sXy>Qe2{F*LDd0>j)>#nzy2IR5W zfxz!65Comvu1`gbWeI$mBhWAho`9Ajj;J(3^D+3~MX1B+L<`)9hDH%$0}R}5PVZXd zuWlJoF}cd%Zr46L)^vGAv6%n2w=f4l9b05}X!;*zL*)UoT44}2I6;bXI{W-7 zV1oMPLVF!EzwPSm>g7rrs{x)i-uHkh>`l`%vrV`f45+|wW~FkbzDR@Yn!F}@;0O{O z)S9dg#VuhD1e^AZxb%34)9o3TOwcTK5UzZuf0-CF;ov$dHTi?s;a zx5HP5vk<~>KT0F^?ngRh6ge2c3i;Kx{ieoHuYYOG*N;6+FNGS^xjLn9A!y*GBb~K7 z6j!jxo$0L*=##<|fAvFh0-aHm>X$Eu7%Ab%2w_QZO#H1$-$y|a`t?gk2fF1#v6mCM z0jH5t0^l@~jQo$&$Q7iN8+$_}E9icl*)WOpuOx%TkB<$0*&2W3m_QLchk{6r;Oi74 z>CyL%Nl@%sxPZ@-cHvX5*LPZWT&BOdL-v9oL0j8$_)imnAv$;47uv^GeAbxiL5LCWP z1{abc()#_6v!{YoSf&tf)5CW&0mCUsLmmMQP2JY5XqLA=kj2l?f0Zt?%&ws7H z-$U*eL*Yv5U+T^wIGx)6)x!hATXY{KoHb4)hOh0oeyJaJ_I- zzutisi8+evZ0UG0C}cnCyi~t9+EvWy6hXH}C_dh>U#P+kNPE0Wo=3e?1ZJ_VE+lpN zKc@k5a0q}g|0!$>pG`RE7IOwhAxcEqW#7_RE@k9R8h)E8DqmpG)l28FP?hO%=vKq) zxS|#BbVG=3&#&;TmnFX2Lx6^yIPOc6&mi9adLazcr51x0v?e8On)4pW)JT4y^ow%98l zws~|^w-AZdejyu7l%_R`*k6F+m|AZxp1iz(C5Q&>y`>d;BEx6j&_?$r!_yf6Y-i<;CcSGUt73?f2%pB?mBdSoV^zj#aqNo6l5~K z{hwNMMTHqBh!!YB{dj+7yEkjlYdAq}){+t2tn_AP)eUmK_ha7kqdc=t4XddC(>TuV zuPPP7t3cFnLi4F3oZrOqw~Y}@#^GVTj(?jAk(ND=g{g2E)KaW-1N9eb&?iJ+IA=Hm zxU>+dj+{_-(BBEtjze}&F@+)87w#wdH1J^#bQwn9g+(Fl$Dj1tx5`fvuEGnpAQ7^6 zzGo3qd6ggb6UY)OdLQq8ny2FIe*#9^{b;!67sj;RUdR3VOk(PId)vg*@)+24Z-?5w zzDhWKdl#%#VaOzv7)Gw6VEM`r)o+Y|ag@tPY_~KJqe&i>1BWv-_KXCx>-hTB4}plb zefdbyDn4n`i2?)?YmMdj9k@M7=Mn_{fv`KXGzF>?I2v|~1(A!yhtqU!%Tcn#1RQ~) zAgQ<19e!x5%0wLSxsDV{;eP?2A2W>NI0(BS`cGXSW^x~fuuAFkSr|kx3|O($Ae7DH z-7>Ty*O5PTJTZ+t)qKky6J-|yjFmUkp+sA-$wyAQun1_0m}0jQgkEdnyCIwg_`Uf; z2(nxjm@JL=yl&B9bw5J|dQDNX(?PF5 zDJstMy&!Kmn*UXOJZoQByf9+ar?98QzD-|&!&gldeH9EYBUVvw(Xp>cw+ODR?}8E0 zaUGunj&*&nl1xABfkbaYq=d^UI`lhmsF#QOyM2;WE5`GnaY0ayKehK?!ezohCB zco)MVwm2>eaZG0yXUnAP_8Rlbwm~ia$7jljE;xqBYrOa6SPvp4E8sz-0Vt&Bo{R&_ z5??@|Lg2Fs{*rVrU(mXUJ=dHru+h+Hu$yPTC18Eun>%wLyNW)sgRlu4DAH!tgpDH9 z;d$_Mnndq)J#h-8%s$T$_tk98Q8w7iIyK72RIiOYVcw%gwyxGcfW}u)^2x$WCy6gw z-;MhYTN}v{SIfVitejl8*n=MoX0;ajp>+^RaE2J6ue2dH{cGwZ#1L-S53g0nG^#wu zU)ToX8?fKmmjc3m{`nbKDLF++({K#(<(ya&KBe_{H{{vVM)g#N8DacicfURE5#d{Y z&lit=B?Ld{r-(h^;EIrHRr!v>r^CzBs=h72p$*f5ZjmklG~g*{z(HmhssHIbGTsocW_Jb zs4Hx%t`{dY78jDlAK_jz1Q*yf#?giQ?2qZ0O7hpI*NKZ;v?4A_kvb=hCZANMN}EC` zFE8Xo7@i+CNFns9H7Yn~f1&O7FRIjITyBTv7vQJ+N?G!yNApjf=IA;_ga(ux40ttU z>)J9{!*My8A`3YLoYi|MdMpcL}Tw!@1oJdHc5~R>z9+9IN#~g ztc5-$zWj@M!}!ory}DaVXoLh+ATU27&W>XuY$fc~;9Fk?bhZQdFQMJ3`&|Cqo^O1| zvydTZUt9pplQDRwE&o%Kdw7fj3ac{@Er*NAz?UQTwa=4&Xh&#S$?IK+v#9F}S>4zh zpJW7bLOGL=S1$1{$S}?qV9D(Ku^)2Ab;9wMFPF$%3FbgIf#Mk{Bx|K_`xs*Rbyh=Tw(e|H3D@HIr+mpsv6>+EPIkNSwkTNG~wt z`j{DhCP4bMCwkF+ZI1ce6y|z_DT4P7I977lU_PS@>!l@%1~AL1&9fa(HiU~B;MlfC z^&zMee38W;ydb~IcWIHL~w19G;(Z zB5+3uPv}QC8BdK1TAX5rW1I=GZx(Lj7x6l#l=&8MOp)r3Sf9e4%C3?e5 zsf*#gvKuJ}Tlx~(!?)*~_{4ry2}N!rucB_F?>BM=^;`}mJjp+Lbx_1?{)rDdAuv3S z$i}m(i&g?jk|z(ogQ5~HB?wc!D-5fy0c_DPG6{b+wX-z{GEHRF9uww|m{0#PZTpqC7Rja0PFTZY-_euEeW(qp|eS@ z^6PhqyKHGd24Nr~b%J`rcH1G7)wn7>*n>c{M{q^B=5^wT?`lg@;BoBa)p4Hgc#=^_ zc9*r2qzo4?bj0{>6qg^V2d35{_Cws$tLmv+jCWY+^*c9YUX81+(@0N+TZY9(9J~b{ zz1OcYHeERbn#j-XR^E$dyf20AXpfkXU!AXQr~V>W_{5j(Y5e>mKA-Nusx@jYbSvx$ z<+z723DGk|da346Z%~ufl;UzaDpa&D4&rB2Y1eYRMohtVSgBp3eCt|1 z=_k?^@>MRIesf?_^Qtf}<)8Wjc-s}bZf5S@QiKR`D*kmf5O*Om$22BHk)dZ5@I&v; zBXa6Nk|K;TF_a8SGvc5?!fpB$QHtfS;nZXQiqVm!bD-3Bddyzv6T;Ibl5O}2{9)83 zz8C6!V*ph~WONJSR~n2jGWVz*J^gMow~p6sm-{2D&R>ZMegZ6xSk%wsM{ReL*_eKx zS8N4(b7gsp=9E{Mf{SqW3JQaA`k{W1BH3_5IqPJp3BQTdQKIJ>7cy>bzMMI)kdV9$ z5d}H$rinL8oxR_*aElS?TfiH2Ftq(%Ps3scfE||_-duCQ!dY2KXLFvl@5G5U?830{_j~8;;3( z0tt9k$xRnBYQn!LIdgM;6ezt^duNA8{*2c!@&Np8gzQBG2y^x^LI>0tCyCa7nd??H z>#WhQ6KfMos8l?MKG7Uq;X2%e!TPJwwulzHrqguwZVdmE2f@0zln9PwAi$Q5akrEqb(%@t0*k+@GH}JiV7K zgO&vAZYDB$czy!!qRGcHP2+X1k@4}8I8y#8TCWBqK*!j&CwOD7v%(YgeV`{ z=<)q?x|lv_Tc)&fw)Ns5N&G44_tzVzidWPz`hN@YWo9hX->P}u4UU3}O6ukHsj_vp zFTLpXEv^=*d5+d95v%uk3h2%neRW8C>G}M$6;YF12T4COhO#y%_1L(My*Moe)~Nae z+iOnR@im=m^YQcX%Cq~PDs8;Yl?}?0ur3tSM6Rp^a#Ln%!Zm0#h0j3$K*6)elaky^ z4s-_y=xW$e_e7Mxe8%|}r^vlyH_2{h4m+z*Cmw);u0*nRojF^_*b{%vZImjq(W3fv zcX)Cp?^z_?PODZjS|;@Vhl?m^dSr<`ID|Acy!~{~ZE0;iDwE!qAP|bM#+?ZE?lIUg zIy@}P{fI6C`i|d*5%3IBdL*|yx51gJ0U1`AB} z-;w|Ds#04HB0xz=L#WHy#}t2jMYG)&-+)_kYq0k|^KiH@nYlLO(v;$xGcU6U#(@%L z+`@6L=+(V_?er*zdWiu7`bT6{m(W%3-4OxstjUKDr!QX-v|8P8p9~(4O~b7B1p$3R zKCp3B5Zs(QnyXe8ST5CXK6;&`_1SDNN^laC?$`@o=#Lm^gRpq*y$~j^pD-cLvO_X3 z7BpR+zqI}Sr0UoJ0DESL@Q?pGQH)_#d;nC>3a zeu9f|P9ipzBCZxdv|E$W0SrB6Z#BIW-IJvN>=OSsWrpqdZw{-Vk85Mk1yo>|sMoRxL6LkT@Ub_Jr7sCCq=$k$OGun>ttpgjJ@RIMaU&N127YHdG;s>akTo z!>u8DE0(T)D@6pB%O9hwvL`-mSDOODTEgth91=Nh!$+DWCDXyH(!SG$HmHNV77=p& zWZdq=>849za>cZ_ylO6-?ZMb8KMXO&k2y1|n*IJT`)x)1L^hff^}9}7xJ)5@n-A2) zYV@|~q(Uh%DtyPDqip$*ai)|$|Qka*)UIs6Vwj0Pr8PfrhDQhO&&DY;W9_w1;io1`q5*vQUh z6Lm0_c6JpD8<7T`Xf{<$MU;J!?FMD+21}ezR^A!i3$fn_=5zNE?GkoIm@6U*FfMMV z(w8o0hBby5xFvtuv@HC+XE$Hz9y6IMIHu^-+4k-M!vmY+&Nsy%m|WPS7zll0TYm0M zL)*$fD=eZ%#vfOML;2%@;d#$#(liW8vPVFqpWk>bpc|)}qOdrRo=guHnl+io|fs&@$ z%@yNofdN)mf5lo3kk#Lg+*eObW+>2pr#4@61IE{mYKS+gV0QQf>3(r)Y!h#)@*UES z%Q*f6#6ODZ$6XcQ9cBk#FDW89RHr*c%bK zHk7?p{j{YXnc2UKs3hY6_NbA)c{xx88s7*~27ye)DpwVTcUXOcwyUk?+lvWWnU`L- z0i&3A^gJ(kkmc4HgHxm%VoWsE`c4n2#?N(^rcKR$uslW;QE;^w8pB}xd?n29Rp4bP zq+PqVSW={u`l%Q?dp?HKeZQ(WWm|sQ&%MXbNo#s5Bcz8aEDVrIc9of`ZXP85)DSz0VIMm{neAY~|$As3rJwOzCM>Z=6(M z4`Aeck_N5+A`pc#t<&v?=UCSl#y%+EnyM3)$R=K@AoS^6=E3_=ZMx5Cw8>VOTD8At z$ej5Y9!s%bSb~f|Y^*BD%C*6YM=(DcT_!6+b#2SF)achEMTqSm5J%E^922&>$Gj<0 zF4=7E3^VPIaV&iz@LJiG+H<6TKKlY+5f((4m9q?{ULb!nrd`jYiGKrU8opKD^`0te zP$NMxH2IRpLs2G8@OSu5wv^c+Z7Wp#hr^^XhfT&fF4KlG&O6;BDoy3Fm(RPQ4L-_; z<`gcWqUltH7O)XIItx^nZqW2Nc7t0{xXacSY!m#2yv;j>0c1xhF@IQ&Dk$4XD(2JD9HroE{a@77M9=1J+j5eI6~vXuTlYuY$9!a z?iOQ$N=K-;Eyfm&h`Bcl!w5%C)&RN>WYqKV=MB=UTpVvtzF3ICc#hB&$)~F3YkGi!?<*7GpElB7b9b6e9v=0EU67fmmAsr!d6>B1}pkZF_I|R zxy$geq`)GDBc1jkquBqPppKSix@<2idALx-Sa~!)o#X8U|AVc*@PH&^&nliksehp9 zQw}_5>2FhpFKi2uJ>~~AH1#N>WUW~8_HHA4d>4XwKhJ91t_>PQUe`dyk60!q#XXFP zMq8VYv9H5IwkUGbU#VHM(xkxJUcK8WdG0G~W9#>j9{8?&XsIjVbiF-GbAlwkD`)%O zQtee)w%}=@VG5kN8$|s{T=jB&=`BViRs>WbIhY3ZH;6|uE42y=I&PbSwwK_ z5RfvG99ifnpK$&TW;i>VGI}{o7Ky!c%HInRO2$lE9z8=wyX8d?34!MH?;x!8H>OWz z$q)%i-~159#vkNvD{m)#j zGiCeXK;TvX&-eu3F|V)wa&0cJPA!NbNItDEjb|INw+ucJmX1v=Hwh#=f}Hg5Hwg%J z;Rp{%=GMhEMjgk==3aC)ZVHxy48GK@i8@uf!>Lm*uYl~-miT#ZH-P=FPgXZRdw&~Si$+)n1t#yu0{bc*?s^AR{@Ct%W~ocRZR z6noJar`AqC;4P9PZaZ;*cQv?+7GF~@Y|XyoaM6W-BO)_ zdY+8qEn8UUNur=niNs>%+XSnLPpW{og~4sgx#^nUxMinXzd3S%L;W(`(PO?MRUwJf ze}Yx79kIRHegOqzbwazP5*Ka0$9C{2Wqfu1SKu5}`}6*JDQ`-SfPzlqfm*#q-fh?|iC^1w!3}oe)xV_-*o0oGa}A>{ zDtX*Z#jWxNT~N?}R_uyBPl=XeQwJqe)}NWD;qy0ev!0d=fv7YaqC{ zQquA~;h?sMX6y8{Q~Fn11lq4hzqXz{kBi~yu2a_TArm)f3<_cGGKV+`=La3wVSN28 z{^}dw3&QMKl;3th6&1rN;6`) zw77{rZEaRkzC|TYi7_)zicZcghK6-67>Fa$GYPTr;2fR$bv9!S>ugY~}^#gmeB{js0^&N-4 zPHSPW08l8kcS@6;*gAZ9`Mjypy!FSSZ|O}<{L&ff4;*XK->z(@pL0hUiX6+p{>Jg8 zowtQU6rMMtZs7+x-WcgySOVLacd@S7K9RxoM}YxyxBY}hDie`|A{Xz;bhjLc%06t# z@SDFbFpWlH;;Flp%<8-7Jp<-{!`{uim9=m=IYa!zGy@|zDkgcZO{#lYd(j_x7UPRV zVa9ik-tg6J@CKuxPaxy_tEpiafz2UAB3IKR(TL63hfp+dnm%q}xkmNbZ>&2=6E|QG zbEizwQEY0|etkk{#UW9WZL~PV85tc-&1o7)$ddrhE3i7efA#368r>uuk<*M$RQ zW6H#9Y{>b?m-EgOO9R2IyKr3fUG4r$uoL#Q2)j?s@%A6E8^=srj_|Plx*ECg%3~W$ zJzt;E3;AjIpKqy>b*P-d#&7{ha`5fJmp(_I8A=+IEv%LGy3zOpXHx9@mGBS0`!o*Mt!C&uPqMxw>_Yyj{N=$^n@6KGrwQ>voQ%rB24?c4G>!w zGrHej5zlq{qWU^21mrk=F$Hi>i0dJad7t=2eclp>kwXT<>s}Xn^}yU4=2M5eGni_0 zObQxlvYn;<=SoLXcUmUl0F}~D{Ei4YF5Adv>hct>aDPrW68x@@PNFnr-hYkVJ#n8D z{}!_wL7hx&sPYTRNU>dUW$&T^N^Jp?l21^0D@Vg{kOpb?U#Ky~gS z^l^ev;PcH1Won~K?G^}$*z4^m}jPelcpr)!DgBZFR0d7hFW@m`46_-XU zzE|esN8Nz$xV@VnY+wawZbIQLIm$(TjsrRS;|&KC6t0t^jHFiPA65o`V*@@y9~LHI zr{xRQdL!$;x*eb#$cs`Yxx0ZT9avuTp}XbMsU%1^ck8aQxkB;WMozfqdrd>iAJ9+? z+h#tPupSvA0`w3TWd3XS&2BOWU>iNfn3NY$7L|6wd85The$QA1v-8Tr+Q(H-67;EV;iTfb@DM1XrF8D;bx z)PdAI)?#vFAD|C@Aj7aebQBn(5GTOyR4uLFIT4|V)wZT(W#LC2 z%f36QmJSU5#7dLRxq48u=1-#mz00Fu#?v4b9l4q1oQ8TaH&8axy43tD0w*Y4T~)f9 zs}!H5JsRwE5<_?jEr&|hy2iLHk?8 z3nnbd-cr!s`SsEcitBH3aydLbdeaf(xcfSIEpu04@ynHjg4b3X&R&^#~vA2Y)*X`D1J-kHrfnFtUXP_ahryx#jm1rOP=i zfdwzcdpQz2_`@=%Ip?w+z?0EjnV$#mZN{dN!tm)BLtw*U*D%$1eyy6A>qP6?b+tu@ zF}R;%*NuP5!EWS0!_1_S71>A5(0#RaLzc4eQ9gn$6E|ttEBkr!akC9i3;kFfJ>LK? zh()_PXdSe~N+@5@s={;`NiVlz{-0dfLjOta2WnxCoSf^4zPfuXr+{c-C9>D3)^eMewasI|v7%I}A0Y@q? z$1n0$O}j5#_L-Fm{g}u;;oIYkgO5AliMgAljY*v@edbZcH%bhET@X?w4rj*MkwtMa zS*;hceExtx#EVXggKX>B&1C)}F*Ht74;Qv91FKVJVVRE(G_RW&e{ifBx&9OY)v5-%NV*>yuT9e&MD zw5m*+@k96^@$LKMN)eVGtB?`b`n8Y~j-Wf`D*R-A4p~2RyoGo!s}GcEFVEYShoN_d z$p2bMW}ygMb%YgpLi5L}3e<67hBj`Ya8`A{mnO{}8ZG@Exjc~TdreF-luXm%(I#=r z#b8Es`i8Q_hc1MJi*(H~L5Rnw5md6iiNUn(Ykve4b7!hFmO^;s&TjxL2I3pM!-j7sL~9GezHZv%`VK`t1wKCmNQE5UU%_~uPALCr zajNHMC$Km~>+~EYpjfR|nE%pYP$g?oJQK~=yVyg0p&55cF z9s2M`;qU%LvRnc1#d>;D)?2Xs@y#WM#p=q@H?0Uiri0-HEmDxz7Wezoh~5^LN_>A3 zBjlv0XG52^4g3~XLDI@w`AI*x_M*}7yXnWV=kup;NX;b;xSZ^H8U>AM*VY7fcvEt| z#RJ>JHcR=IV2f4~V%(Ip5AQ>$H17)BD)g&S_=L%oZ3J-e`7mDaV2F7aG#aghq8SWR ztKex{fPF__MLB5c?f!adF&_)+zUBIn=6iZEW4WqYZyLW~M#`@?DiUA7!ntCVrktzO?Waf*sVajQX*1 zMraJxK-DlNimegZ3S!!`ynyTc4?&NLsW3wlaU~?>B52k)NuHgq>GU7}%HE>6D@E#! zIgcD8ha(6Fk8h5v+Rbh5QIQ!Q8co+9SJ)=c?#V}Q#a@o`a5F#u?vlV&tWsCYruBXC zR=~o;z@9M|fxAE6v@cplilVpEi3{U_NybHS<=jk+3SOF0ckuXlVcC`4SwGbCFY;5( zkHv%0)bO4t9g1V!j@p=;IHtwG87Td3myxBXi?UFACgpTOe}SD{mzP1C-gR0~1e9Td zy>nD}U@%=sd`iWi$>i@-mnMGoxSGI$~)CyS-u>a^v1^aI;U&pW$hU4 zy_u6mk{C->k0B}W-nw&yBXr%G zX;7DMdN22XL18gYcKx%L%^9Qgmav4LR#4z=(%nc5izo{V-XZF*S%I4XyPA_n`r-C||^+L@io zs>w(ZNC~e83MA<5y5CrFoJA|^7`j>}#n^~BjuOayiF9GxclIqSbP7<}p8L!(t9P8S zF0pGlKGbskyA7V<5E8;+Q&aD+udh+&Me(Ot(*G9XirD*mf4 zd+2x?MO5iFO)gQ_#-ZZMw$Svltpkoem(5fyyYK$&+zRWn)VD$i;%41hByiq*&(1e~ zCftV8=w{C8a?7xMA!~w*X_}NLHHeq?e7|AHkR>k>L^5IwYd2VD+Pu@NOLIzDF3FeJ zMj{dOyBBplnhR4boP00CQETJU*n3tc|#P>zzTI>nJT>6P#`Wq2n_^Ufpd zYlD$HTe)-MqL8{&E?~AI#%rb!V#!mjuxgi*_9A(-_Ue|yQ(y~pKEl|%uBv=F|h@zUdFtOS{FT zk4jO|#->Q-?8s#^-PB=T6WM|(n`tTlt!tZqOv|2_17-+GNEp*hVq=~*qP8#o7(Vd?}4^kEZZ~hDlpH1jqfzrsa>EM-mf5HVX zIy=)gVj=|N+!L@Q=HwiR7V!nYAj`VY@IV5liDjV^IvA9+`}x)0Swf!7&CVNY7^&S0 zNnpLztz#!N+B5LsdD;MCZCXTST|%N{CTxQcfu%(Tv~We}qWsyJ2oY0j?~42($;Qp% z-PYT|ein{ztdsdsa&R9esa~Bdb)tc9BJOoZ?d4)zsYM3Dl)l_A4h9B!rO1<)(>Xa# z7liFWJ8OOZmSaA*XTk@t>#VTUYu8Dp6rgWk%EY0%2TMO@t6thl>{hZ<-TnI{@3@74 zSdQREpvYb5XkY>{zP%~am;yrtVL%uaeM zb`gr1x2nvu?00^5`5ch*y|@7y!ciWgGFc$QV>YI^5(AZT;g6*Eab$JxECSa$1Vo^0 ziQW?(@iagIij3 zy`IX`z4PCWyk283cw=(7Lxn6f`k;L^gtdh+&~C|pn zKD;0p*u)XC$_K)A#`t!taebHMq_BP79gmpJ@U11;NY1j5xT+~h*e77ec(CsOyA;sf z6m>*K$_*^GPsna1Pi_lgpXq+@;FfEy%fmB2^wdJf&h{R*)m#8$wzK2Y>A6BnWgoN0 zR*W~nO)iqv7FK5>GgxwCBlveX3|_tVwg3v{b>Gh_^ca(qSMTY1JP^3u$Q3J?1@mH7 z#%g1p$@=Z^oi3|WISM2z`Wd|I~ba~^hwivhqzFTO)k<#;zU5&SO&WPRheP18?Pc?VimLs zJ1L)?7FwL^ey*tBYoPkBc4x~ZxP0_=w;jmhu@P&XGqM53I|Am95>xpkj5P>z@iCQc zGe0%U4DW>&$S}CkO~y!Y2YW5`e7=KSHO@~ z>&%a~pfVt{3{U4DZ_Y{&jzsczHJJB<<~9z!hVT}LH7VtE){+wC^eHe~Gr_ywm-Wo|o7}%R9j0|uqP&64%!dYPiZAL> z%BbAwr%F2szI>8b=;a8nd0$b(7a`3D|L@3%3@rGN%*m(E|2y=WFJDdY4Z{Zw@CRD3 z70~k%nKq!|pIL6i^X7ktDiG$F%8q2Xz_0&vGDckVa!q#JX8Y3r9a5MN7TVLwVU+v# z>RPath0vZOmW5v9TK_xrnFRE?DscP%y;iVYz6x)?TARvTO=iV^E+1@1^#{S=$f3jvaFd^fG-ChvM*cs(q|ImOo;Fm6+R$rzDDY2FR!ycx$|Usv E0UWExzyJUM diff --git a/vendor/github.com/docker/distribution/docs/images/notifications.svg b/vendor/github.com/docker/distribution/docs/images/notifications.svg deleted file mode 100644 index 6c3d680b..00000000 --- a/vendor/github.com/docker/distribution/docs/images/notifications.svg +++ /dev/null @@ -1 +0,0 @@ -Registry instanceBroadcaster requestrepositoryhandlerListenerEndpoint_1queueretryhttpEndpoint_Nqueueretryhttp. . .RemoteEndpoint_1RemoteEndpoint_N \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/images/registry.gliffy b/vendor/github.com/docker/distribution/docs/images/registry.gliffy deleted file mode 100644 index f4250410..00000000 --- a/vendor/github.com/docker/distribution/docs/images/registry.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":358,"height":310,"nodeIndex":182,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":20.000000000000004,"y":10},"max":{"x":357.50000000000006,"y":310}},"objects":[{"x":254.50000000000006,"y":246.0,"rotation":0.0,"id":179,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":179,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":70,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":72,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.2575759508250144,3.0],[-2.2575759508250144,13.5],[-50.125,13.5],[-50.125,24.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":225.50000000000006,"y":117.0,"rotation":270.0,"id":177,"width":220.0,"height":44.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.4,"y":0.0,"rotation":0.0,"id":178,"width":211.19999999999987,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Notifications

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":-23.999999999999886,"y":117.1999999999999,"rotation":270.0,"id":175,"width":220.0,"height":44.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.4,"y":0.0,"rotation":0.0,"id":176,"width":211.19999999999987,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Authentication & Authorization

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":-67.99999999999999,"y":117.20000000000005,"rotation":270.0,"id":173,"width":220.0,"height":43.99999999999999,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.4,"y":0.0,"rotation":0.0,"id":174,"width":211.19999999999993,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Registry Service API V2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":117.31462811656212,"y":201.0,"rotation":0.0,"id":140,"width":77.5,"height":30.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.document","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.document.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.192307692307692,"y":0.0,"rotation":0.0,"id":142,"width":75.1153846153846,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Logging

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":192.31462811656212,"y":201.0,"rotation":0.0,"id":136,"width":88.93537188343794,"height":29.999999999999996,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.form","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.form.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.3682364905144297,"y":0.0,"rotation":0.0,"id":138,"width":86.19889890240907,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Reporting

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":52.50000000000007,"y":10.0,"rotation":0.0,"id":109,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":79.81462811656212,"y":55.0,"rotation":0.0,"id":108,"width":201.43537188343794,"height":124.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":11,"lockAspectRatio":false,"lockShape":false,"children":[{"x":92.5,"y":54.0,"rotation":0.0,"id":102,"width":86.43537188343794,"height":30.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":9,"lockAspectRatio":false,"lockShape":false,"children":[{"x":22.5,"y":8.0,"rotation":0.0,"id":97,"width":45.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":57.5,"y":0.0,"rotation":0.0,"id":95,"width":28.935371883437952,"height":30.0,"uid":"com.gliffy.shape.aws.aws_v1.non_service_specific.disk","order":6,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.aws.non_service_specific.disk","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":96,"width":52.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

image_n

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":28.935371883437952,"height":30.0,"uid":"com.gliffy.shape.aws.aws_v1.non_service_specific.disk","order":4,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.aws.non_service_specific.disk","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":92,"width":51.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

image_1

","tid":null,"valign":"middle","vposition":"below","hposition":"none"}},"children":[]}]}]},{"x":43.93537188343794,"y":24.0,"rotation":0.0,"id":85,"width":157.5,"height":100.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.multiple_documents","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.multiple_documents.flowchart_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":0.0,"y":0.0,"rotation":0.0,"id":103,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Repositories

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}]},{"x":127.50000000000006,"y":270.0,"rotation":0.0,"id":72,"width":153.75,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.cylinder","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cylinder.basic_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":4.1000000000000005,"y":0.0,"rotation":0.0,"id":74,"width":145.54999999999998,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Storage

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":103.75000000000006,"y":29.0,"rotation":0.0,"id":70,"width":210.0,"height":220.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.aws.aws_v1.non_service_specific":{"fill":"#FFFFFF","stroke":"#333333","strokeWidth":2},"com.gliffy.shape.flowchart.flowchart_v1.default":{"fill":"#FFFFFF","stroke":"#333333","strokeWidth":2},"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#003366"}},"lineStyles":{"global":{}},"textStyles":{"global":{"color":"#000000","bold":true}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.aws.aws_v1.app_services","com.gliffy.libraries.aws.aws_v1.compute_and_networking","com.gliffy.libraries.aws.aws_v1.database","com.gliffy.libraries.aws.aws_v1.deployment_and_management","com.gliffy.libraries.aws.aws_v1.non_service_specific","com.gliffy.libraries.aws.aws_v1.on_demand_workforce","com.gliffy.libraries.aws.aws_v1.sdks","com.gliffy.libraries.aws.aws_v1.storage_and_content_delivery","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/images/registry.png b/vendor/github.com/docker/distribution/docs/images/registry.png deleted file mode 100644 index e645df1e3a06645af3cf072844657e42609fa125..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24298 zcmdS>^;=Z$_dSje-JsHv(j`cXbV-9Ch_ti_O2@#^3L+v%N_UAgNQd;$DJeO04>^GF zx$*IOKdwYp;F6o@pu(;orxHKp;da$_hFV2pSapsN!OS-<Rx-o68sH)$_Uds4V=1YuSO)kz89xpgBHYWV z+P)X<_K&ir>$hLYpyjY2=IEh3TzoSbi*fs>pvJ*}Ul4oNLHjIFjzpyBcqx6gnmBHX~BQfxiONUZ# z`vR`eaA+B&ug|l23@Q}3Ui^67YwWf3!KItdvu9_za)9k%-e*5?f8Ou%?a_Yqq$&5r zcRfL9_ZK?*=2_=%x?G>LJpu3I*L8nSXLG$zwp>uF32LFlbp7kA31%Xz1;H`sEp#qD z4_yA9O!krM8$XD+FC0y(C81sKw#oFT-aTgJi@0W*--TVFSxYnQ?2qSYjtXhr*7u?u z7z!!yZtihq;AdJfozp4H$g02NgkYg$_Q2|rU;KFaz9k8bk*IQm$1WkH<@#cKG*2V>#4ng*Tsn?hUj*{Ge(gP} zW|U#|8|o9S7Qeq%tpR9j#|NF1W<4M8G9wmlu07#va5ec}`ER5sXb_b-7zZStPJEmLDmGR|bDp;<`R)vx2(ZZ_$`d!DiD+<7wFanarsq znSw2JyLXqsc9$}~PMEI8LTO3=)y-2+WYUs6iLG-|)LFlwd0X}MYei|@S`yJ#qR87M z2{6e5epl!B{zxteJI=!}qNLYTTt8z*e(Gcs`K+D&6uy@1_)b^u1-ql{bs!G?qYSN! z&HQvLuxJT_LlR>JZ>Jzp%V#t8rMRy8D$j{Kg27zerMD$O8b@a($##y`yW3-&Pnvk9 z8bep(kmoCreRlb&Y%y#a(G(92shs|TgFrMj@3j))0xsFtu-|N-i+r=3Ht? zp*jLihNN#CbcIsLrpOA|$$Qw1R;PT>r^19t=x7`po!{3L<#6IG{{n{wH?mSMj>oLI zQQ>|~US!nYrBH_6-Z<_EH@~gI{QG)@Z^)4t6#+tT$i~H$C)-1a1e-Llp)BuyI6S+| zu*5-p-GbMwr}^9$Ce9C`m-MC*RCPnb86CCo#u++z1lt_+M?ui(yuA-ees#?Hy_oP} zF1_zmZ;BNL6}jJ#{xEprzGdadOrj&1p>bc(<}-iYk7t=d$2-$w#)Ihd#!*@MEjQPf z5i?j}E&8RVtaGj3^BfnN^Jo)_&*oes#W)EWq~-!(S@j`uVQXm)3gg8pw`Vr}_RK~G zQsQ5quaIC==@q?*b6M?8d{SN}n;QK73swY{i;Ulb@7bRWlW~dhayVPEa;|FJ?@M2B zI@WwxE)WW{DO}IC%kDOw&e(GvK{pFtWPvDS)d2?bc}nkP;(C7~^W{$EkhcP}xg|8% zZ$bafLUWS@c&5B3JPu{Q{{>uy+hrZ03Qf`eMPML9hnG589?rQW(sk7V`M_{UP zdq~~DA!SG{f2R6haPn$gf+4lsr8@S2mTEircLwVX7wn z??1B>E6^kM!QKg?BJn|T2zehem)=gE@(>#Bx#Us5;8U(sR_c65;Y->^ne~;J`4na3 z2XwhpMCP%LG-q{=3u7;c{*>BLl>B$&@gI=Sw2z+3$$(BQ?CZlkAbm3aJcy;`Y~H8uGqs>q-@_WWBr9;+oiT9Rp8syUkgvrc zwwV)?V^C>hbMG&*k8d@ zP-%h9p+tsA_gcndu4aZi{+zb4@$dt2U8m^9L)2u;HEX2QMFG#tGB7K_?z4r@*_T{f z(Qv+v-AOOE?Cu45o=Gj9=PQUdR?m!14W`Se8~&axEL8b`1k*cL=bCF==Mu>M7aU>) zKpHWUr&G-TKesJLT!V<+yPocEioT{C=(AUs?@&1)@)Z*W@+CFARwLMp~M zF1={SW>nXB-&qPFSY;`@(VVc>@P@s5HSe>%w=*l6t8=bH!uQU>GduR)f)oM!-#lX! zG#3DBNQnUV6P5W$M)GO_q#*&@L0S~}jrD48+*%Ii z6iC%0PalSmkJ*FJwT%P`D1T&6sk zdW3ec-*RKhTff0nsr^W4$_u1+hZ(bax58--oocNc;m**18y}kUYrCXjP#MOvtRebG zh`n+YOX&Gs%$^MNz&&FYN^i6l@x5A0$qLAJrhI6x$Lw47f*yz}i#;TPVE^J|o2K>l zutcyBFJ`|$yPCFQSO4~EPvx<5<_ip1otoHASvw)s+17*_z5kVS<=wKv@dE93gd#gh zPvza0?nJ1?Kj6&1osrQgHL5B3mm~m&T*H0#ZB1f5 zPfxm28=eibvSvZppxAm!>~1CX>rW&gL^I*+{l@+r;TT?pyp63}emSv&KTB7a!MOrCXZR5pO(VAzD3&amhx9CD< zj1rNeT~cPBjd@|gy6z{?aCLI0pZDS8jAq$Mh~Wn_l*}mkC38sA2f1o zvTjx)87i`dB$lt36~&lUz5ZHdH`e9A1Tc4zn87n*yfmUvaSV|=x{t%y@c+^YHA|t<$^mKYlk?+hfu=vl>iHBgwxQ zl9G3OG>0z59=K{N#Lv(tqFN<9RS}h>f-MqBZhiV>GeY znk>p^VLxwV``$>c%6$9p)UMm>oMUUSKs6b|*kX-&M~T;hUMAJDEF9MlXIaN#eE1Ld z2x`@hpL`u=wdCrNL{A7bzBXSNP}AU*G>Hjn=!=HX;8F2XrroqURboPtK zFWS%W@q~#>CABwen|~fFmiA?hRP+YQfhUG3$fhM^d+{k)%eVPFC~@({fgNBaiM-LX zi%GY=emTd|($())ex4;d<{{kTlJV@nQ7GN-!o1tU4v``*J||w+mx^&5?$D$UHGaHZ z5ypPwrF8riHnttiRxB@e$NIma9L5ECw`0Fyo1@_~^YyvYkM%mM9&Kkj<(Z(fe}Fy4 zRbePJG+<1qZQt&bC=PBLfr<$iehwYOLvaU5$b!}0aKLTM;WYn(}jf$yVK44?Ny zjdQ{7%=-Z|0ClaD(y63vC`o*AX_E2CoD7n;iQBMo9m%A$~4{-R9^p_I{rdZS>6LcY!0W=v$N_Jg9sXjRau)isxi$O9RL2l9yJ(aQtBf7P;ULePP-nuh3zx#8Rx9H1x#q8ksLmq-n%Yu(m zA#L-tvo5?3p`6|$ij>rzXvq#0DmkoB8T$J5!BqRUznK+FxqHafJ}f=W%%Dc$?OvBu zyK<(bfKuEyD67!hV>GXcE<-P<(D)=80yCx)dNZyJIQ+VVK1>bC;D1$)hC&^!Cx`2| z()o+0$VlB>9R=Y)$F1XmOgM>;AA)gum=Xlr3*`~%MvM!iFnQs{wtDeSEnzKl7qt z9jqu_Qq1SQKpYrU?$?GM_NwWR^LNv_exCjqEt(Fv!9ZgWQH&`Ts%$CGX;wdKXkltq zF5^?qeiE#Fd=gR37+FQ!!=9|l)w6D|P@QV*)H?YDj`enPOIM{C!isVWPJ-_@v-1&W zfCc;R>3bggoq_lGJ4-sef4DJ>)+y1`+*JoK&G~*=GfGte5}QGGe{uWxv+o*;&rrJ+ zQ)GxH5+w&NH}F+K71?HpOj@5huMx;@)|P@dr?ZYUG~W7uMPi1|5S^dc;z%pr{+!O^ zQ0MTys3rC}w7>i%w&;_J={PN!9|Y@=O`FUD0Kb-B#kl%ST*f8+qgMeipyXv<+G+{^ z^(qXHkha^Msb)1e$bb&Z;!unGCm<@ObfZ0&V2yqC{S9Se`HT$CQA&TEf$V~RlLlQ& zd>ZXIS6dF%=p|@&Fx#6BODKX9WLPYRC8)!u4-UT?Zx9h6CQ9bJdni5V%(1^~M@2sF zVXj*lgoKl$JKj;o9G0nXS&cU$Tx?*W99Ey;mHo-vup_YYI{?Q>p|b645aii+b2PPV zl-}eBle*H?O2DgOPJ^9;@9&w>C~wEGYsJe3W2-DQ`+f*`kYMUv$YWT=G5)QvFoX!l zx>lgIH_u~thK=B!BzvTV*I`nG=Df8fezTB}Et%LN)64U%NLA$B1O)X4V5q+(1XR_Z z=WE6RL2wF6e~!&4!p!^8vGK6$nUmJP>XZReZ8%Wv3mHO75L*xT z1~3N#Ws<#%nXI2<+`P;dpr+0M7lk~4Gk8pyZzn!;SMv9c6#<8-_ zpJjb1N1Z|-F$GKf7FJ^Lts1d+MSW@)xT;=xcMfowfU}uNG(gaA5{L?cGRxMgk}UGJ zKkq#QbdRJ~%SnUxxSr=R7IgA!xGnVdIV^8+A~c{$K9u-r_uF9a?ED4)o4*5|=Bi!R z8gc=tV>qRzXls&Zb7OredQoR>Pm5W!K>i;}8R13Ovny7d@zlt2*Ra4Db|!t+F(EO8 zC8TD_arcLl*yr2`4drv^>X;#{NI|nFhD?MYY1s~*Oa(Uj%3fR@4XK4wJbW)ReRZ=O z&PQ>)pzSLB`R2Uobf&K$;okEUvqr8S+vef$-1kASHD+q@s8B^fMg_}tNS|9|?DS8c zx7=$+dp%QS$JUB>y<+0qoxhbp(8NnHXuVuFyd&__Py|L?+b?D|zOKq1DXZAzeS$f; zPRgzs`^RH;029|mgV(;8aY8u$weblM(<0ZjaxBc~>3uQoytz*=uaP7KcmMG`-t*te zsY_5Mxwa`33ps-0=i2D1&N6h7J18@0`-Y~?&UQL+XVd5b)D7lQdb6KY-B{{v?PKpVu2E%Hr1d}b5><}WgA7jYrui&w0Nq{7s`P5tKp<0J1Ee8_1 zkr_&c5KMh_?)OVny!nmDAGa~}@ce;&quvf)`lRwzEp-6XzmgI68AtzH48?kAL>IUw^zC&MeS23cY&4}1GsA+5Qo@nubouDLRNV&zm*Fcv|ms4 z6sbDfp40-@CA4%kEio3Ol!}i%!IJr=;de5jj|BQs58$D1c+H?aigNYKUP@-s?dUc$ zc>hjVUx#a^80mH6=>-{dYjbB`Zfmcx(rw2pXdG2Kl&L;5s6W5od}-RH<)uTxYZ&Vu zvpYloP>FtoZ5x?L=9cK1SoQqHym5d;>7Mq$6=0c9MI(9BJo#Vge!wSBQT8;j$HGJ2 ziIAc>KvV?lmnfU=q~)jk)eFk|qTLbqE;h_nQ!a4v!ZIq-|lq>BEa!vH&6~eLp zZJGVFuiv0D$c9gtfv;dlB0F5j`S{(Iy7DJ{sq+JQC}KpLwJlhUM&theL)%nnh^Tp? zkfHHfirUS+zBP12p+6Ahx^%s>BSZuS4k@r5RHZs?XGjvNUz9OA;)RBN<3jtkq1S%8 z_3cyU)ZS2v*w|d{YzNzFeZy?MF=Pw`uiGOK%oFvMM2f|AeSjod*eOj=Q(EKn?TnDq zwXrSi<}V2%@Gf(cNM$#DgW8bmA6!a9`;4qQk}J|0X2g z=;2Bl6`y01JoyUsOW(b)_N4$k+uEgX*nCZ4TO8zpW|s@Q8&YN1oqBHYV##p!9e2WwSxd?UL)EfT=pQyvZ-Nf* zP_K*Et%SK8{2CDCrOXNFOCiwlkCJGXolkl;@4a0-57S{iZMQBaXX#nV_&mEcY3BdO zAU*XHS{-^u$InOk5q&UPfc0 z71xWX8i~a;OhHxsqB@1NdR$$QgmQKfBbH-!Y`i}UMVy7U7FzrfGE#Ax^uAXIOYLf= z9X1up(MRe~550P#h^JPBh(U$Z5MQC3B`pz(8`mA>?|6ag@JIDm^i*m6QIMNNMv_$yxH|9u-QwahGp#Lda&O>VC!(i zPEf?rFKT?wh9*(uFP*{VkaPvOFqN$N;2doZB@nXh4NXV>OmE7P0o{yYO5(bq#Zq#8u!|KW8 zg0k8?Ubp8XK7Zdl3oH~6_Kl__hQ}37tp1-D0B%Vs4TMzYj-_vp#OU4gu0GTCR9c$4 z4hXyKR!`nHUE*HO3g;EzQ_iNk=5v;r{+tFwsvI$WdCyV@aGuSRwe3~U=~!cmlz;J= zyfQ8A__Mm-?nxDF=l2N@)N)I=un}r4*07z(ilTgI%Huij9!cvz=ZxGvTj)LB+%#b{ z_R;Xq=IYm%qnoZ-xXFD>oSDC;Dc<`&OiLwl?@x|-aFc3p*g#KvrTcbMv^N&^T z!r~h;VkfaT=Vr(_5K8&(1J3d4-XP9M3#oWp9z2Gf#$RMHTFu44KDV02&OL(Y^MEFt z8DkDhx775{BT)xBPdma?NuQsIkg3y{169wf!|px+zV`)A^L`M#0w63QY+cTkE97v? z^|WC>9^OaybHUWGMyh|KDsPNZmyf} zt^CbEK)@rX;S;;cbg!$!iq~+9Lf!JLl=;;vBBYtuA*+kwx~72ZZ-*nf${$sLt@J?G zkCkU_*r<)yjM;W$<3uSLBl#Zknv%AA$5V9jj``L8s1Cw}UbP<7ErrnJ094CHbqeYd z?7Ooyq9szO7BwW6jB#y6I1q&UB;47CAFufqyf!v#Wj!7GV48rnE^ui4T2uIEyOI>g z(Rz#7TG?cYiIE|l_s8;VEiB|?etCW0R!})vvFyrAm7_eZZ}oER*EjsRV_$c;_P{P# zd^P7wh0`HZp>|UU?O5|4Y?K&LidWm=)0!45_^80PeCoty(9zh>L>24=u>=)y6ee`0 ztr*_}nf)|EU@t_QmBDk)`Fjc6X7~q7Bzv!v?*&yU zD0xD0!5dsN+{H^(-hj=D#VkyuzUiNZmyxk?2h8L36PZC;$*x12ygsa0It)_FvX*ay zBLuh7-3n)?ZQ=6~_jCK!cF#ls=szT7%PTJRZTMN?eNu{?s3;YMyylI&kvw<;_Dzh{ z+7HiFpc!yhHk)is#1Z(Mss3;F5i|&AqX-uN&D?}|p})8Gg#=!clzn`Y--dRuk5%<- z2JDXFOYQV=^~N%SUo#70o_OrMrnpCvUMt!fNd)S`Qd(_lDES#RU&GzzeiD@>jndgY92dUE^XP+3w=KAUj-yCJEEKOF)f#Y z7M#4?qIZAlD{lNW20~hMH_N-LM6Ds`mkd~+J{)}Mx?J_Qn!FNzW{w+TT;CQC9V0z`9&mmN$iBZ zpPCv2yz3n}oPQE5xc8z?*O0@BYyMYrg>ClHY-NvNzbk1)~S5V_WUj2Vo?1 zPsSzU)if!S_Iq)%%KwZg-Ii5Z7JP4-pegN^jb?rZtos9rmh-rnDxq{UI$!pOA1MEB zg@R+g_;l3j7z_DL?g+ibd8MlhgO~3sDVC_8lEwyCKs`9_=+GYo#1aQGmZU>P`fPm< zLTQ<7Q{CgsHCFGom6^8_o;R>8Kw>{pTpWO+9ksjWH$FE&MEg391}vBD=AFAukZSiY zPdmz|-cFiW#e5=61poQZy-XHt*R2J3b6*bJjL6~v_Wk}>oh;HGbs(ZVnatsBYRY_O_QLi=88;#+gM*`yU1yenk18b(zUZ;l?O@zG0NA^&J#? zR^Hd8O95qflTdl{R^N#0@%J6ox0Zhl#n!%^j;^(*ocxPd6xV70MD+PYM9hRtN0Wjqm;}9!_ z$ohE@m4sJW=HK#edchHA+@{U}k15JTYa$jUakj@?Y;)9Q?XFjAqeHB9ki7q+I0>EH z=PEK0hSfh^&Fy+Clxqkmc%$%EMq5Ho=Dl=2&}ls}xG&J~;}Gx9bLj~TdfUqZ0f73cU0f(0h;8ns;co-ObJ9Vbt>v^=8bljwrO zb4ym&*|N;83)YI-$Cu9hBND@2oOfU3J<`z5~kTsC~U zELEWf;<#q7wIQtOs^E3}%|X-E2`E`2-XChZ=W_1|tdCC69eXZBO9UAM3=FQb*re0TcTc{l%RF76(y6UIN}; z#v$Xn)!&)OnOII8$NW@r?X5CezAL^XY9K#DOOCx8^K>p z6ovmVN^q7g(ktdVS{sUyH~iUOi0O#un&?XHSrUuhEK9GqQ<$IjX4!xdZYdgBO~geD zbCA5)$o{v8qxZk;SaPsyV)5({AjH0BW@k%Dj2|MMx5f*f*4yepE+oA1#xj%s?Wbn1 zly}N?IkZo9M2;5i_PZihFB#(V<9497f!^4gsSxIyt+DQgVy`Flf~dcf6Gf{tff%cQ z&!nTCfNJ#%BS*TPH3o42?@bx~uTY^WV@(Zi8&1Y-12(CX)`Ql6UhBv%FlwF5QKtcO zp@8R}v88qJPxGa=4{PGpF=xUqXdJr(6%AQ6@aGjEWD$x)3B@mGj{&-4Df4`G5q(q#j!G;7@8;~c zkt6)868+|mvj8~;03dbEd@Wa_=fuAzB^$i6O6_>w^8#JVp}Q$3<$nZqr|r=X$2^Vn z2V=JC2@8HV?FykxOE<62{%osNVvs}!6F+%08tqem?aC?cdqJ2eVp;F}1!rTn3OH)u zvN-fCm>AAYgz8F4b}bG4bq=%Yya@zR>m_%>nt+DcwTtf!)!dZyId${nOJJEps=-aFk)W~{&oBYR%VgJJ{LK<&+)umrUtmOA0b^jy(yp~1Q z?dyQfA1O9DMwvBu6gxi=1qRjWcMUg9BQg(th9u7(OJ4v+fJKD7Go+>;a9uTaO}X~@ zW$EK>c9vHng~O1Z|3T$J8}({+x5tY>nK)?^?QxyN*jwz*11=m^t7Sj0`9|pd72&bK zcD6X?yTpAEFJ9hqoj`c)bIFy@g8i#X7VqQLXE=l)|FmV!9$SGTp;Az$0NSwP0wak@ zZA6!Dl9DzKty4fFnBp}>zwch?iT^~09G0Oz`QvFqvTKK}$LDQ7KA9LZkYnH$^SQziNl|1HUTb4l;PIaY;>`LRfb*Yarhqp@&_7nw(nLC zR?l<35<-X%PvSxDM4hW`q5hC0aT!QZ%2C}bG{ogz+FgPc3!J17{85oTssoxxsGbXx zS1|W*!++Cj$|kh3N>V*X@3V9F{U7L1J3%*Crauzz>i}&cu z@K1EgTa=nMeO~#YeyRKimn#HE--{Ch;ZYr~kP1#`+Mzh333Z5I zqSQamCXfm8@hOck?rArK_^zjyxz~1aO>fi#`TlKlQ>qm*d;;#YcuV(APY7KyIxh=E z90J{$Dqq`0&-i4(-E#Oc0_P2|^(7^mU3Ik!(^tmEL5u<-vA!5`CQnBEjEv1qFYxoC z8z@f~XM4Sr19@3ix8z>^cJ?P5{s=HESuIyDOJU^u9!Ew*jWOr>INbkCUT-eLnJiUd zS0)>^h$pL7OVZbcitkGazuzM$##q&C zMHtgx#*g94Xh=G|c6bUr$&GbBS!NK?SA#dx{g3N11RgKFC+*)1jv?SH-W0XRe5J%_ zVcDL-mn=0Zg-*WRr}`_!tD;Vqhb?No<0GZ9h`6T6;yv~!Yjk+(I#F6RE;?6VbZvt} zrqa#1!ecGPb>h@|6?zFriBGR`v5TGVH8g4x+;&J zDz`wywrDl|3e1%q$X;3;8f9&@e<^xMX{CumhiWnyPdPK8AwWIcF_GUgBysYX8ZQ+! zv?gzA!7+uoku_uP+J?adQ~8_}Vpp!WZ#Al6W*t`T-&DWq3JUs%V@)9N@m%}RK7C9C|0N`<*Gv8iqQj{ zO7--MXf_m9BPJLUAg5Gln`b;@WGfAlgwSMM9LHg zj`7GsM(jI{^MgWFCj5aCu0baQx!e-65ifvPa8D&|K`$Ci@;yH#;o#!{p>~BOv{<6o zgs)uQO9Ov!>R*NPc*G`pRkn>6An5(hiQzAEs;&N-VDE6ix8zDqoHjDPT8%()G*F{( z_V4zpF>gp^=>z21i3&*Z?{Siui0H5%VYVt+Te>50u>&N~G4mlJXvPW{7Y-Z}Hh9Yd z%;}6n*8C+B^j?c-q#U6{L}r?`6;JgeQ$_3p#X=B+tK7bs>4~Rz-AS%G$nHo=_-EtG zV=1IB`LY#G$;_v9Iafx%NdvVs0rBf(O1y9a5(|Zy)Nv(RMYT_$@V7-AO{SHGMoN)f zt-Hw5_QA;DH;wGniX2~SsMz;EJZR8sV)Rq66^cS++cdFJx?A^=)3_D~`_XE={9N>{ z^o)oHbfY}?I4F~how6=oVblV~95J8xzL_SjTk|}dB4hrCv@uId;<-h;JNU=w1@O2v=7lg5;Zy+;h!gzLCS7?RNsN1+~Zl4$B&`nD3 zs-p^13yggiNb+pm@py8hwpgLJkbwL{(OcJ_Znipu_vu-ls$wjnx#$WWr>fcSy}zfI zXntD$O(w0pB3j~17QXNCXiPD8XG^OOuzQhoCORQZPI^uPU&eb>_lRuVHZ0n|RKkC`3iBf_1bY<%K4YEz#?J&JYVGmhCW7~km2-|+_(lVU7=yaq&FWtF>xQj9Yr4TDJpIq zJuoZ}l%QdQRZCTtd06-|zj}oqzL?{I+_ZT+x=)+Q|G*N-pIFZP^yZsa|3F#qNyQ~L zBMUdW=z&~<4+h1963Ste%kjYRGFP2r^QH#Y>BbH}`HE%J4mnd#yLdo2s{MljaQBR= z8sL(CHgL(3@IGWrsNg-~Pp&B7tK(4YRbZ4G(0j3*7VfO2shpXD9`uqDE4Ok=hW=z4 z@N2bQsqX@hdRhNNhF|B=lpTW59tT8c3&-4@ef6%!B-(NKJ%xn?8;Ov|(UfCDp7Ah& z2E|Uf@)BB-#sT$_)R+7M=VE#wDkWDVeky>c$?a{l=sB&l%<&i&G^i z%clk!@}>eMNC=0WkG8&=khMJ6XslJB5pj4~aLXLoD7zrcl*P)Y&2G&oh`PhbZui#i zTQgAJPjG7NrKIU$|3Yt1d%|-j+vwNH1rL3~=fUWh*sDFh)za(j6uWs3iIqes;)$u; zw6PL1Az<8_;o_2t*}Os2%icOall=UttQ5z(5Cbj8i>P&zAfo*+a>}*Qm*VC@YB?MU zWHeEHLc=^l-h!#`zl|@&D9K7aAM6zOFAtLN4EnZ~rMiYr6M^xoQJcY*X0~mR?6zLZ z^gY@+v(6kBUzV4YZy`T2!*>)`w=TmUBoQ#U{RReS54l;nzT;j|zBueg#nt|G!+ zn``hgus+{gc+Daeup%mT`YTiGnw+_(ona5BJgr@p9&fQbfx=!+gF`SycB8JZVzyJ{ zy!mpE;FwHOZ>$+o5C!f)eb2NHDTU^bVj(yUvx(20SPAcz-QZc1cq!w|fR!Og54-nk zPHCY$&!X{s($9G7;ftv5x0sa0KQMBLyjN9sSrc{uRnOEd6{OLAYADOqI?vaE? zr9e-5$2?g29HJc|svhl8L>ks{hZ>G~$7no_@tq8xHH<}=M_A}hNVkcS*VHema3c|6D6zxU`5{}6Lj5ZeRIC8>TMz-pTr2X~ zhi+j74hvTo-`DrZTQRA=eTKgpvPs@~BgQ49gxA=Yh1L3@G{C1K=Zh-VS<3;{!62F2 z*dgSZjr@|?hVmR0hg!137R2w4`)zXJqNW$g`?S}^EY?bYwC5H%G8!SfBSUZYi0+Bo zqf!d~OqQj{_xW4360M~~qrM`)XbSp7A$Qw|N<$uhUB;8-RdJ5~;PaB<0io|$^1HU& zOG)x642giIxT~zEuJuwM4#i=ZQz#a6aUHRYPb?;eDM7}g;h%6bY}!@khV4j`&NC{) zgIOZ&2_*d3E@HaH*-0|)EGutmNrg}M?1jyz>gPbjW4>hIH#T?3-eLTt5Xn9LZ$$`i zx)I$r^p#veL^7UH1@hJ>?V?T72RwSzmrNAN7acgdkQhBd%rL^$@HDwX9EhTenr=E| zCxCsC9=j}|OZ<15FH1MxEE|jWhDL4$H=BwSxmhbay|smi%3~cL)Rw@F1v}ix2JMb= z+hZIHQETK1dkDg1WQj(oE=u-8M=DywlWwCF>bu-Wp!oCYbFe^y%C`~+J3bq7#7(zY zjs-@wyFVP^1*X1?uZ!AE0e#tX2p)$4!XD7-cRlcdvLC~D#RPd1&GW>Vv5=kT; zVbBr?$9YHxTSaQ#+;%3cC6=77Oe4Fy3J2C6n0*)d#oP$H7Y-5TKWRvEVgE@g9;HMQ zl5BeXN|}U~H0(`B$EWXKXh>p>8!hGp!9oE^Ctm^ve*KRU6e?6d_L7L z$;9r$*0dO&QCN9^#5)2fy$IE4e%7h*eKPS||7wBiRd~2L3)Hw7c?XSiWTKvt67qhU zJx}kkHnUedXqPnammm$6v=Zlck#+o8h569dj_s4SL4up?Hg^up8FI$>%8Zav%ZkHT zjPxSX2iIvcoUfT<^ZK@3PIIro#zbdFMr!2!=tUHcP}E*n2nKLZEo6kHUV$EscV8A1 zO4}MML3iU?`3qewdfqDn9SYNz&m}~xzYKH|pE_fnRkSu!$)u^hSf00NOXXUMO#jw~ zo59QJ!ASBLLWoI14FtFHP}^>VNHRfsfkBbeVeeaUuKP5Jp}%l8>Y_RuM_EzxvBt0A zIXwBiY>wr@O+l9-m2F9(HupmrV^Df8Oc$O&&D`Q%Xn~XSw9F}*FAuk;pWx*rf=1|J z59#zg8@>G9Ihr$q=_K!&df*z-O+7T$K$-S`K=p= zHZ^KlI~cf;Gwl96avE*T!hSNHSog6rZ9y28;^=MSwXP#VJ~bOEtX)CGR}@)jnnw{_ z{kgC9Y59*^LwDL@$II^rqbH#0^kH2s=A+r4Z)v6v{4vQE(XoA;??u*OlhE(53cV~3 zXv7M(DqFuqP*!TA6YG-9k(x6lUkOwXYdpyAuuZS}*XzTdsD6w#0$4GrOpR8$lnc-a zs1{kCJh?VEj^-OinvA z>#Op{+wdd~L#%qIKe zif6(C8;c_&a3kG-z{8VR`R&;#@hd%^k`UKw#=h^AdSO@=*VSn1u(4=+&*tXMiHqK~ ze-7(;QwIaew$lf(`?ej;GV@Oz*<11Wmha^%w|?9YbIE1gr;Pq_>>d5bR6 z6Ea{azi=qY9@J6h<;OWO)ApZJn?4l=n4fAYrB@olMT9FOOchU!aqn)_EfKVEy=zYz znQ6=sEu`&<|F2ce0Y_-e1(@wi6Y0b~gbrezB(K1cJgzzKTmFWqH_+k#8RbdaZ>$9$jKv-LB9$l8u1jb++-~jQ zvo9iZQnZQ0i@u=SO3N8wIun-@Lwh$&yRrfD;go#5!siuRz&@G|`ZSgMZvF4UjVJ7p!F(x)vm2_)U686Gw|yE! zrkd|$j)`ZItXP!jfA@yUs#e7z$X2@*KahDzIC5*y=n%iZZ7cm43_G7C-h@Et6K~%O zP_q8PQKk6~M&x=@xA|nGIHsIpJ*L)cx- z2W4%@(K2KUj-F5*9IZO>tWkk}5^13jR$P;iLcx6JW2~wqGu5Y`* zK{K;Lpi~4)dhtb98YUT{v*Z0*@aAI%2F_7N#|dFjuP;&m_DJ^DP`Nrfp5B?NpTEXXX>|*0r~^Y@S9hrKjJ!}53s=cZhd#M z^LAAg+&lQl-0JU)d|g81*IS+T9bg4x(X%|ANC~tCRsR9j4(?ajv+*X690Rj!lN!? z1k~6Kyx<`D0BW1!9D+v{%%nMmNqLoFyF_s09wPEEHD_xY7d4BSO(6y^ni7+gl?}v638H{{EBz zu5z7b7_@G$ep~(XB&gEFdS&+4)Uz3d_0m`}zx>K~@~O02@2-{hZ;3`^QKl$Ck$7Cl_o~ab*)ERiB3;oD@CDfzXiM&0qGICcq!MvKsM;B(ZR?# z+@Jy9igRFfZcynkFbOvQ1w1BZOs#XkXBOnMYbAcBu4J$a5LNk90Bo6MsUnV%Rzi8H z;snl~E_90gz3Y@m%EjNp=T5{vD;Cy3#@jN^L$C1gN#mT1<28$NM3ywFU`&+bvV@O8 zqDPOWc|CFef(&sZi2UkdAGOiYrK!tO4f;oVZ~dZxp(l)85#cAv(xAXZMWn)j-$0bZ z1vF5t?(*yMQm8$nS9qBI-QGo%>HjW706~n{S~A~yHU4$=0(W%Hh(XiIZ=_Fu^)z^UI*8*YM7n%cTG9E<_!3W9V(F%*%e zG|53gdanXX5v3fYSB;=F1(9N?Qlu9F5fqi8D2RxmNmZI4N{}W30fBfB?%ba9jr-^O z^WAap9sY2PFp|CZT6?WI=ey?nWE(OV_)2reGfp^XSO*(_Z&*g=?(fGu*Ou}Xe3Gf) z{p7usJSV50P!wJ3l8gV&)Ym(mKA>>R8kb`Mqy4==xL)B1X*Q|(zejs@VGCT?T_D>x zOd8E-&$mRP6IGq=9JG=T@j#~<7^%}c{3+w|_<@Z0if%uO=i`LvD7Ui2scX*Ap(r0TXO^Yx zUqo>0r7!6lQa}!u6?YqH2~}nLGF~7a%S{e7OErzWw|b%q z$`X-m*9GksG(6@qoVI8R93jl+CaQXvFe^<2rB>NK(gY8vOz859`c>-dQU$m2whh;9 z%XFm6-7uGN^k=2MQlo_m9!-wxisNN*JTcyOseEyzA2DMG_O^vGY;AzOpGOTvYBMN+ zMk={WQx(A#iJ?m-xhJwDRk&q$3CCKwKAXl2O0)e{pb>49*BCg%xCQ-tZ0+vL9A7)?Y|E+0zOhe>hr@&$Ad_}1Oa z)^M&n1qE5vawdEYT(m2;>Qd+S)hzcs2|{6_D;Mu1oGx^`g+M=i_RF15yT9`3+4Tig(s#huQ1*%I;6uWf zL$sz<-Jv^&^Y(xMY%`g#-)Q#Mk!m8~XBehmwcl)G*_uxJ3)nNVRDVn>-xDg=kpvoL zJMjIta2+e*k-27g?}R1MG?poAS3vcXg1yUm_9QJ$7jc~20$R|*AD80)tWa|^uB5G` z{@tKoNsO96MVrkn1`-;D!v8w88-M-XjC|$O2X$ke5h;mD+Cep7G0!o+Z5UK^MF?X_ ztLxL`sKaBtmq4*2I-@atC|n8EYUZj}NyxE+lU|ymB?Cc}h-(NnClgIB|1K-4&Zhf} z)-P*(J!cRlQ|5oqq29+~&{s6*au-h5fGp9(FL(aBoDNoKiTz@VCC3HS&9g-Jynrh$ z3VI+wc9;DG6GRzb4Hlc9q^o@Vlr{ayL(=wtDvFu7QH>U!<63=~zk@EtCc$-XRx_@x z?X774!kh!n$gIz3m^Nqs%b+T)j%03n2H7n6S=6u2qbM?8bxrrLlR zgTPl_#`X_x48e(*2WldU>uT*nffiABLAS<>zc0BN=AHHF*CrFbX=unQ)c&D+7d~#aWifR65-yaHVC&pZ>@+tdPYzcISG~oUj8nXH3 z!_T;=CQLBpa1{Q>JsufBtncje8fs?GPjh{>qLl3mxBqbKo^QBmXj1Msf(kQgg@xBc z)_g`s>`SIZFghHoWP*6^=oHWJ5c6czwo&Ww=kQp|gSaaRrr`B+McOnDNApCIY|BMQ z3Lco`DKRG;IxVPoDGl67*%I-hSxl5THg91+$FS8m5?#97WRV67 zPzsFLc-FA3nDyINZpE7q@vSfiGqLVz>~qC`i#s48C~Hbfea(9Equ0KVXxm_853KL9 zx<#VRiQKTh#&4c#wB9mu`M(W?2H1-A(Kw5ni=s>n*`r3)ZSv&;wk_3PBQ@rKJS}hF zGrCsX2LO-@!Z-JJY76sn#l@eNw$m>kB=)IvtnlZccw3<7N92%0P;xRQ*{A~jly9IM zBcv{8mr@_T)*xj~kTSPVWwkU^EYTV}YHS}e;52sbv6jLGYCDeZ&*Ba-@udj>EB zOcdn}%`trw6y^geZop76+B6HM!TZShKLZ?FSb=vaT(91s04i?o`Is#u%3^9)OdMDj+xETBVKS3EyYHe!C|-d z&nUVJMd10=XIkT(M6VkCsG6Z`c2-JC5(w5(l(Bc{x}&J+3~42ww3Qrt&m~FErm(&u ztl-^vaEne!`FC+2JU($|)#ilw3ukG`@E1M4#?H}U*FtkXeQ3RVyRtg2{29*4Qda!W zukm?7OM6R|y0z1*SS7VXR~Ik ziLCErtcUl`S{K~a&^Tu1N+3a!D|^UVV1$^8J4g;}J#Fccxn_QtkWRKv>75vpF3;v5 z2P)Y!P+k%4r-b=AVmi64j76*hV1!; zj^SmE`JQyfk+2_@;}Rw;TkGtCX{Rcy&4Z22?|Vi`@9nwAN1_9I#bBV_Vd^@*Q(V`q zL%G->Ftf35u1Dy|lZ!LzSeA0pnv^he14^V@vzrA=TLfqn$UiyTxv@7}2rCbP8ogX} z3Kc*@q5m6dbgt0z&@^ZRzoAB3)!|D7rr**8K#ihrR68Wdz%>!5(e(43r?YZX|3HnH zIG;h12F}6XP^0Ca!+CH2A1>B=fCQM2Cs?=lP)vEw!6^=7RHK1QiSk3B2U5Wy^Ehsoj!DdMT zr%YE{9Q#ZQnzg%u<)~RE--=|%Ask~G%*ZOj5!`y)@I*io@KN7j01=fcfmXHRKEN^f zLzf5%FxY~{uTB$hFQ4=5MJ9r}mGr{k?>`zTuYlr9LaI2BE;$nB0j9V~V z#9`oI0rl|Q2`Y4hcK`wh2j}4_#Yra&+LQQ?fLC4Wi=*c-8^Wks(@-%J<(n!T1b6fd zl;;_cb?PIuJ3D~TKx6e;3aI5BP}YgUBc+QZDHrKoshZe_FcYx$1NhEJQI>)Y+sAH+ z1&ccfi>OPc`FM>!07ycqfb-G(d)X&hnYPz_!X70V4uF<323Fu9y4Ns4_N z>fML}T)+-g0`eEynM8!pD`5yAQDc)IN4g{9@2LJE$csN}syWYKf*!lXiJqbiRTe8`)9)D3}Q zj7E4~dORJWa0>suVLBnT2Jk>1wxWR3s1w3*QDR0t59TaiOvm%sjh@K&_iei1Y_-&> z`C9wrHuTxi8y~eCxofqs23Mqp1>$3t9za6AC3Y5P2-! zdh?E6mYxw#z1@5Lrx@>q%1j-2&gJz#UkW3MiH#LEm3=| z+nQWbZLJpZ?fXD5yFIRw7<NEE?{xUSW( z_OYy>wk%9xlq%l;v7C5I*Cb3&pcB8gyqCsL6zcs`ARX@9EgH3=H0cd zh#44_pIIduW99bhd7rSmYe$lN{2L7U*ymhK=yUFeLa3cMF-8;Trdsb0RdzUQPsiIZ zRhy)4&B}{l8`k*|LH3qmyXu7FWIH*~539Pl6G3$$?LT*eKiCZbY%56n{9mdCC!xi2 z?ei-x4?sakScg1vwGdFP#^&p~=~hTMdI5Fl@mFzgjl9@DvMmBpzDW%-EjEcSg^z}2 zpePZ~F;@b17oBz`8*Dq@)N%rc)K&D@w^$67eBM5L^-=E+s>6E)Zh0l#=!0kNj_-=A zAKlsLkTN5(`dWy?D)`InY#r8Drd>AL=u(#3(gVX|4<7Rm7z+&*_B>l9GrrgAs%$eH z!Dgs$eBpSCT{F;7TXgo9F2^o+x3*TbM1}peu{4u;AxUEX`qNrx^Ru3}28>psHkztp z)#5T#Di)6PY|HD7->4nk`{=a9gXZ~eZ0b=Lwncw_8&7@LUDv8_`h9hQT}SIP2_;xT zm-er?+fiXYTRhY=hG=T-KGWlchOTGayPieqlCx||_TqAyal771WTS2u4)W`KDFjuh zA%4&WO&Mi7lU;HqBh6!+p@>IM+oo=&cB$C2(M!-b{c9~YId8zkkg7Z{PI~^1X%i7M z@lqrn3OxnWfjvCN2+`!l!~P033_)yz!^aszZRLNiS!A!Fw%u<*WDRH1ECfCNVBOnl za}0GQ7WrIm-;F&Os@aIZa~-%5!2tODE%qSrGEO;%e&c}{9Zo_%k2VvBfxfZy$kPhG zBxLg+{n!^;O8E9_ER=gEd0K>f9~*0^^)i|E18!pL!hQyfKMkRE8mz7adFR&aY-%R~ zpg2(HeRgdVYHat2aG*f3y7Jw6f!wmL?ccW?r5v#hs0}ixZ|Oi}(5Y_t!oZGo1$Q6q z)M{|jZW!N_1CN87e{C@Sn{VPH)%K|l1Ilh5_AmN;!O(AV>|=hTwXOkRJIh(7x%%=s z1UaTTn+gkU0Rs8Jd|zSo_Re3+0}swBO*^l)^ZFjV3{m2be<@SAWsnMHK&Vy>egSlq zfl|Xm?0c`y^%YW(=3xFyAB7Om8vyV+O7A#c>;$@A8CmNhX+UYJ}$R=Tt+j0?blCkiI?zvv{N+h13-89vxK0|9sCZOIJ=55(V5Jj75jrH_8m`?1?~ z<35b8EaYOC`Y=B`4Ye?7T_Q|9ejyt{#f3}~+|iY`!>BgBVT6E4@8S=lqaLs-m|KdA z^A$WZtiX|+FoGNi4@sevnw!D#Tqe!7tqQ%&hB}-tn$tfZa^(EP^LnaxN6O)WCPE;Z zqANfs>CQhH{$jKxmxk92$#s+sKk8c$FU=NuGD~(m4|u5F^_GTw^4Geh$W!$C+@Lql zA6K3j<>Bp+CbdAhP!RjO097oBz_ZJBmeKV_yB2Fk?uF9u4R^%IufAi%@@0 zgrl;XPj0WALhm5r%16*l%Tyu+gIQBfUgRE91{-Y+mHFqjUq5UfM;`PmPsOA%f6k`^QCQ z&JyVwHa~@jqZgiDd@q0J>D*e}%{Ot8C(K4|bGIRXkw$ni2!A!<2f!>uMwSMpkJ|^o zw=V3C3_A{aHLLWUiI{(_UKD{Vr_)3r{tm#;%sL@=2#`~CA?;Uz-s=)*G0*|7Q|DK( zk!Jwi%<~Uo{Rc&_gC&O2jcw%ts8B0;*OQp!b+h|)xst1ld#B!(RD@CUG%S897j5J0L(jibA|WQc-xx=T#nayBY`f1 z3QA$6#`5HS8yS)JYf8Kf3p|P(0}Jo(_xR#lB{yJ$-r09w^zYaDqq>@tQTuf^4wgy* zuleN{n~rN^oa{ZyIAptlLgt?x^5Y|Zj(&h?Cn}gC2q>w5fZ_8&o~L0py#86e|MUIv ze|59{H$T>r{}Xl(ue=Y?fIkxE*IKHtGSwNW;Q4`TxKnHV%PAhPAbEjEUv2G4l|r&F z%;2q3QiK0=Lf1swbwfurDG~w#*>>4InsLqZ)QAvme5hxw zo}M++qfK&o=!dyobp8xYaC=nUVHK+Hb0M*CCv}H}!xapaM|>OGAZ$i2rkdz6zoPhI< z_)pm9!N1|?osq&wCVGIiI>Z?24&eNf7Jt)zft=h8sjjeq{ZjVt%b5KxOb`(8txPCD swebyO3cq2#P=^E}Q0@Oyi2E+9P*(uq3M-p6FziwK+9q1nn&+ba3w!&FBLDyZ diff --git a/vendor/github.com/docker/distribution/docs/images/registry.svg b/vendor/github.com/docker/distribution/docs/images/registry.svg deleted file mode 100644 index 723855a2..00000000 --- a/vendor/github.com/docker/distribution/docs/images/registry.svg +++ /dev/null @@ -1 +0,0 @@ -Storageimage_1image_n. . .RepositoriesRegistryReportingLoggingDocker Registry Service API V2Authentication & AuthorizationNotifications \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/index.md b/vendor/github.com/docker/distribution/docs/index.md deleted file mode 100644 index 06a47842..00000000 --- a/vendor/github.com/docker/distribution/docs/index.md +++ /dev/null @@ -1,63 +0,0 @@ - - - -# Docker Registry - -## What it is - -The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images. -The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License). - -## Why use it - -You should use the Registry if you want to: - - * tightly control where your images are being stored - * fully own your images distribution pipeline - * integrate images storage and distribution into your inhouse development workflow - -## Alternatives - -Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more). - -Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/). - -## Requirements - -The Registry is compatible with Docker engine **version 1.6.0 or higher**. -If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry) - -## TL;DR - -``` -# Start your registry -docker run -d -p 5000:5000 registry:2 - -# Pull (or build) some image from the hub -docker pull ubuntu - -# Tag the image so that it points to your registry -docker tag ubuntu localhost:5000/myfirstimage - -# Push it -docker push localhost:5000/myfirstimage - -# Pull it back -docker pull localhost:5000/myfirstimage -``` - -## Where to go next - -Simple as that? Yes. Continue on to read the [overview of the registry](introduction.md). - - - - \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/introduction.md b/vendor/github.com/docker/distribution/docs/introduction.md deleted file mode 100644 index cd0a0a28..00000000 --- a/vendor/github.com/docker/distribution/docs/introduction.md +++ /dev/null @@ -1,59 +0,0 @@ - - -# Understanding the Registry - -A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. For example, the image `distribution/registry`, with tags `2.0` and `latest`. - -Users interact with a registry by using docker push and pull commands. For example, `docker pull myregistry.com/stevvooe/batman:voice`. - -Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage driver like S3, Microsoft Azure and Ceph are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storagedrivers.md). - -Since securing access to your hosted images is paramount, the Registry natively supports TLS. You can also enforce basic authentication through a proxy like Nginx. - -The Registry GitHub repository includes reference implementations for additional authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way. - -Finally, the Registry includes a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting. Reporting is mostly useful for large installations that want to collect metrics. Currently, New Relic and Bugsnag are supported. - -## Understanding image naming - -Image names as used in typical docker commands reflect their origin: - - * `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull registry-1.docker.io/library/ubuntu` command - * `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find that image - -You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](https://docs.docker.com/reference/commandline/cli/). - -## Use cases - -Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available. - -It's also an essential component if you want to quickly deploy a new image over a large cluster of machines. - -Finally, it's the best way to distribute images inside an airgap environment. - - -## Requirements - -You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking. - -Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well. - -## Related information - - - [Deploy a registry](deploying.md) - - [Configure a registry](configuration.md) - - [Authentication](authentication.md) - - [Working with notifications](notifications.md) - - [Registry API](spec/api.md) - - [Storage driver model](storagedrivers.md) - - diff --git a/vendor/github.com/docker/distribution/docs/migration.md b/vendor/github.com/docker/distribution/docs/migration.md deleted file mode 100644 index 5dbd766f..00000000 --- a/vendor/github.com/docker/distribution/docs/migration.md +++ /dev/null @@ -1,30 +0,0 @@ - - -# Migrating a 1.0 registry to 2.0 - -TODO: This needs to be revised in light of Olivier's work - -A few thoughts here: - -There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released. -The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool. -One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry. - ------ - -The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process: - -1. Configure and test a 2.0 registry image in a sandbox environment. - -2. Back up up your production image storage. - - Your production image storage should reside on a volume or storage backend. - Make sure you have a backup of its contents. - -3. Stop your existing registry service. - -4. Restart your registry with your tested 2.0 image. \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/mirror.md b/vendor/github.com/docker/distribution/docs/mirror.md deleted file mode 100644 index 78928401..00000000 --- a/vendor/github.com/docker/distribution/docs/mirror.md +++ /dev/null @@ -1,62 +0,0 @@ -# Registry as a pull through cache - -A v2 Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. - -## Why? - -If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the image fetch traffic on your local network. - -## How does it work? - -The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage. - -## What if the content changes on the Hub? - -When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it. - -## What about my disk? - -In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching. - -To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage. - -## Running a Registry as a pull through cache - -The easiest way to run a registry as a pull through cache is to run the official Registry pull through cache official image. - -Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster. - -### Configuring the cache - -To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. - -In order to access private images on the Docker Hub the username and password can be supplied. - -``` -proxy: - remoteurl: https://registry-1.docker.io - username: [username] - password: [password] -``` - - - -## Configuring the Docker daemon - -You will need to pass the `--registry-mirror` option to your Docker daemon on startup: - -``` -docker --registry-mirror=https:// -d -``` - -For example, if your mirror is serving on http://10.0.0.2:5000, you would run: - -``` -docker --registry-mirror=https://10.0.0.2:5000 -d -``` - -NOTE: Depending on your local host setup, you may be able to add the --registry-mirror options to the `DOCKER_OPTS` variable in `/etc/default/` docker. - - - - diff --git a/vendor/github.com/docker/distribution/docs/mkdocs.yml b/vendor/github.com/docker/distribution/docs/mkdocs.yml deleted file mode 100644 index 07bab4ec..00000000 --- a/vendor/github.com/docker/distribution/docs/mkdocs.yml +++ /dev/null @@ -1,18 +0,0 @@ -- ['registry/index.md', 'Reference', 'Docker Registry 2.0'] -- ['registry/introduction.md', 'Reference', '    ▪  Introduction' ] -- ['registry/deploying.md', 'Reference', '    ▪  Deploy a registry' ] -- ['registry/configuration.md', 'Reference', '    ▪  Configure a registry' ] -- ['registry/authentication.md', 'Reference', '    ▪  Authentication' ] -- ['registry/glossary.md', 'Reference', '    ▪  Glossary' ] -- ['registry/help.md', 'Reference', '    ▪  Getting help' ] -- ['registry/storagedrivers.md', 'Reference', '    ▪  Storage driver model' ] -- ['registry/notifications.md', 'Reference', '    ▪  Work with notifications' ] -- ['registry/spec/api.md', 'Reference', '    ▪  Registry Service API v2' ] - -- ['registry/spec/json.md', '**HIDDEN**'] -- ['registry/spec/auth/token.md', '**HIDDEN**'] -- ['registry/storage-drivers/azure.md', '**HIDDEN**' ] -- ['registry/storage-drivers/filesystem.md', '**HIDDEN**' ] -- ['registry/storage-drivers/inmemory.md', '**HIDDEN**' ] -- ['registry/storage-drivers/rados.md', '**HIDDEN**' ] -- ['registry/storage-drivers/s3.md','**HIDDEN**' ] diff --git a/vendor/github.com/docker/distribution/docs/notifications.md b/vendor/github.com/docker/distribution/docs/notifications.md deleted file mode 100644 index 0552f85c..00000000 --- a/vendor/github.com/docker/distribution/docs/notifications.md +++ /dev/null @@ -1,323 +0,0 @@ - - - - -# Notifications - -The Registry supports sending webhook notifications in response to events -happening within the registry. Notifications are sent in response to manifest -pushes and pulls and layer pushes and pulls. These actions are serialized into -events. The events are queued into a registry-internal broadcast system which -queues and dispatches events to [_Endpoints_](#endpoints). - -![](../images/notifications.png) - -## Endpoints - -Notifications are sent to _endpoints_ via HTTP requests. Each configured -endpoint has isolated queues, retry configuration and http targets within each -instance of a registry. When an action happens within the registry, it is -converted into an event which is dropped into an inmemory queue. When the -event reaches the end of the queue, an http request is made to the endpoint -until the request succeeds. The events are sent serially to each endpoint but -order is not guaranteed. - -## Configuration - -To setup a registry instance to send notifications to endpoints, one must add -them to the configuration. A simple example follows: - - notifications: - endpoints: - - name: alistener - url: https://mylistener.example.com/event - headers: - Authorization: [Bearer ] - timeout: 500ms - threshold: 5 - backoff: 1s - -The above would configure the registry with an endpoint to send events to -`https://mylistener.example.com/event`, with the header "Authorization: Bearer -". The request would timeout after 500 milliseconds. If -5 failures happen consecutively, the registry will backoff for 1 second before -trying again. - -For details on the fields, please see the [configuration documentation](configuration.md#notifications). - -A properly configured endpoint should lead to a log message from the registry -upon startup: - -``` -INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer ]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry -``` - -## Events - -Events have a well-defined JSON structure and are sent as the body of -notification requests. One or more events are sent in a structure called an -envelope. Each event has a unique id that can be used to uniqify incoming -requests, if required. Along with that, an _action_ is provided with a -_target, identifying the object mutated during the event. - -The fields available in an event are described in detail in the -[godoc](http://godoc.org/github.com/docker/distribution/notifications#Event). - -**TODO:** Let's break out the fields here rather than rely on the godoc. - -The following is an example of a JSON event, sent in response to the push of a -manifest: - -```json -{ - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "size": 1, - "digest": "sha256:0123456789abcdef0", - "length": 1, - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } -} -``` - -> __NOTE(stevvooe):__ As of version 2.1, the `length` field for event targets -> is being deprecated for the `size` field, bringing the target in line with -> common nomenclature. Both will continue to be set for the foreseeable -> future. Newer code should favor `size` but accept either. - -## Envelope - -The envelope contains one or more events, with the following json structure: - -```json -{ - "events": [ ... ], -} -``` - -While events may be sent in the same envelope, the set of events within that -envelope have no implied relationship. For example, the registry may choose to -group unrelated events and send them in the same envelope to reduce the total -number of requests. - -The full package has the mediatype -"application/vnd.docker.distribution.events.v1+json", which will be set on the -request coming to an endpoint. - -An example of a full event may look as follows: - -```json -GET /callback -Host: application/vnd.docker.distribution.events.v1+json -Authorization: Bearer -Content-Type: application/vnd.docker.distribution.events.v1+json - -{ - "events": [ - { - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "length": 1, - "digest": "sha256:0123456789abcdef0", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-1", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 2, - "digest": "tarsum.v2+sha256:0123456789abcdef1", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-2", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 3, - "digest": "tarsum.v2+sha256:0123456789abcdef2", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - } - ] -} -``` - -## Responses - -The registry is fairly accepting of the response codes from endpoints. If an -endpoint responds with any 2xx or 3xx response code (after following -redirects), the message will be considered delivered and discarded. - -In turn, it is recommended that endpoints are accepting of incoming responses, -as well. While the format of event envelopes are standardized by media type, -any "pickyness" about validation may cause the queue to backup on the -registry. - -## Monitoring - -The state of the endpoints are reported via the debug/vars http interface, -usually configured to "http://localhost:5001/debug/vars". Information such as -configuration and metrics are available by endpoint. - -The following provides and example of a few endpoints that have experience -several failures and have since recovered: - -```json -"notifications":{ - "endpoints":[ - { - "name":"local-5003", - "url":"http://localhost:5003/callback", - "Headers":{ - "Authorization":[ - "Bearer \u003can example token\u003e" - ] - }, - "Timeout":1000000000, - "Threshold":10, - "Backoff":1000000000, - "Metrics":{ - "Pending":76, - "Events":76, - "Successes":0, - "Failures":0, - "Errors":46, - "Statuses":{ - - } - } - }, - { - "name":"local-8083", - "url":"http://localhost:8083/callback", - "Headers":null, - "Timeout":1000000000, - "Threshold":10, - "Backoff":1000000000, - "Metrics":{ - "Pending":0, - "Events":76, - "Successes":76, - "Failures":0, - "Errors":28, - "Statuses":{ - "202 Accepted":76 - } - } - } - ] -} -``` - -If using notification as part of a larger application, it is _critical_ to -monitor the size ("Pending" above) of the endpoint queues. If failures or -queue sizes are increasing, it can indicate a larger problem. - -The logs are also a valuable resource for monitoring problems. A failing -endpoint will lead to messages similar to the following: - -``` -ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying -WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off -``` - -The above indicates that several errors have led to a backoff and the registry -will wait before retrying. - -## Considerations - -Currently, the queues are inmemory, so endpoints should be _reasonably -reliable_. They are designed to make a best-effort to send the messages but if -an instance is lost, messages may be dropped. If an endpoint goes down, care -should be taken to ensure that the registry instance is not terminated before -the endpoint comes back up or messages will be lost. - -This can be mitigated by running endpoints in close proximity to the registry -instances. One could run an endpoint that pages to disk and then forwards a -request to provide better durability. - -The notification system is designed around a series of interchangeable _sinks_ -which can be wired up to achieve interesting behavior. If this system doesn't -provide acceptable guarantees, adding a transactional `Sink` to the registry -is a possibility, although it may have an effect on request service time. -Please see the -[godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) -for more information. - diff --git a/vendor/github.com/docker/distribution/docs/osx-setup-guide.md b/vendor/github.com/docker/distribution/docs/osx-setup-guide.md deleted file mode 100644 index c924d457..00000000 --- a/vendor/github.com/docker/distribution/docs/osx-setup-guide.md +++ /dev/null @@ -1,62 +0,0 @@ - - -# OS X Setup Guide - -This guide will walk you through running the new Go based [Docker registry](https://github.com/docker/distribution) on your local OS X machine. - -## Checkout the Docker Distribution source tree - -``` -mkdir -p $GOPATH/src/github.com/docker -git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution -cd $GOPATH/src/github.com/docker/distribution -``` - -## Build the registry binary - -``` -GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries -sudo cp bin/registry /usr/local/libexec/registry -``` - -## Setup - -Copy the registry configuration file in place: - -``` -mkdir /Users/Shared/Registry -cp docs/osx/config.yml /Users/Shared/Registry/config.yml -``` - -## Running the Docker Registry under launchd - -Copy the Docker registry plist into place: - -``` -plutil -lint docs/osx/com.docker.registry.plist -cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/ -chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist -``` - -Start the Docker registry: - -``` -launchctl load ~/Library/LaunchAgents/com.docker.registry.plist -``` - -### Restarting the docker registry service - -``` -launchctl stop com.docker.registry -launchctl start com.docker.registry -``` - -### Unloading the docker registry service - -``` -launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist -``` diff --git a/vendor/github.com/docker/distribution/docs/osx/com.docker.registry.plist b/vendor/github.com/docker/distribution/docs/osx/com.docker.registry.plist deleted file mode 100644 index 0982349f..00000000 --- a/vendor/github.com/docker/distribution/docs/osx/com.docker.registry.plist +++ /dev/null @@ -1,42 +0,0 @@ - - - - - Label - com.docker.registry - KeepAlive - - StandardErrorPath - /Users/Shared/Registry/registry.log - StandardOutPath - /Users/Shared/Registry/registry.log - Program - /usr/local/libexec/registry - ProgramArguments - - /usr/local/libexec/registry - /Users/Shared/Registry/config.yml - - Sockets - - http-listen-address - - SockServiceName - 5000 - SockType - dgram - SockFamily - IPv4 - - http-debug-address - - SockServiceName - 5001 - SockType - dgram - SockFamily - IPv4 - - - - diff --git a/vendor/github.com/docker/distribution/docs/osx/config.yml b/vendor/github.com/docker/distribution/docs/osx/config.yml deleted file mode 100644 index 7c19e5f0..00000000 --- a/vendor/github.com/docker/distribution/docs/osx/config.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: 0.1 -log: - level: info - fields: - service: registry - environment: macbook-air -storage: - cache: - layerinfo: inmemory - filesystem: - rootdirectory: /Users/Shared/Registry -http: - addr: 0.0.0.0:5000 - secret: mytokensecret - debug: - addr: localhost:5001 diff --git a/vendor/github.com/docker/distribution/docs/spec/api.md b/vendor/github.com/docker/distribution/docs/spec/api.md deleted file mode 100644 index 9b56b6c5..00000000 --- a/vendor/github.com/docker/distribution/docs/spec/api.md +++ /dev/null @@ -1,3450 +0,0 @@ - - -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -docker/docker#8093. The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8093) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -Each set of changes is given a letter corresponding to a set of modifications -that were applied to the baseline specification. These are merely for -reference and shouldn't be used outside the specification other than to -identify a set of modifications. - -
-
f
-
-
    -
  • Specify the delete API for layers and manifests.
  • -
-
- -
e
-
-
    -
  • Added support for listing registry contents.
  • -
  • Added pagination to tags API.
  • -
  • Added common approach to support pagination.
  • -
-
- -
d
-
-
    -
  • Allow repository name components to be one character.
  • -
  • Clarified that single component names are allowed.
  • -
-
- -
c
-
-
    -
  • Added section covering digest format.
  • -
  • Added more clarification that manifest cannot be deleted by tag.
  • -
-
- -
b
-
-
    -
  • Added capability of doing streaming upload to PATCH blob upload.
  • -
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • -
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • -
-
- -
a
-
-
    -
  • Added support for immutable manifest references in manifest endpoints.
  • -
  • Deleting a manifest by tag has been deprecated.
  • -
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • -
  • Added error code for unsupported operations.
  • -
-
-
- -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least one lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. -2. If a repository name has two or more path components, they must be - separated by a forward slash ("/"). -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -When a `200 OK` or `401 Unauthorized` response is returned, the -"Docker-Distribution-API-Version" header should be set to "registry/2.0". -Clients may require this header value to determine if the endpoint serves this -API. When this header is omitted, clients may fallback to an older API version. - -### Content Digests - -This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). -The core of this design is the concept of a content addressable identifier. It -uniquely identifies content by taking a collision-resistant hash of the bytes. -Such an identifier can be independently calculated and verified by selection -of a common _algorithm_. If such an identifier can be communicated in a secure -manner, one can retrieve the content from an insecure source, calculate it -independently and be certain that the correct content was obtained. Put simply, -the identifier is a property of the content. - -To disambiguate from other concepts, we call this identifier a _digest_. A -_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ -portion. The _algorithm_ identifies the methodology used to calculate the -digest. The _hex_ portion is the hex-encoded result of the hash. - -We define a _digest_ string to match the following grammar: - - digest := algorithm ":" hex - algorithm := /[A-Fa-f0-9_+.-]+/ - hex := /[A-Fa-f0-9]+/ - -Some examples of _digests_ include the following: - -digest | description | -----------------------------------------------------------------------------------|------------------------------------------------ -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. | - -> __NOTE:__ While we show an example of using a `tarsum` digest, the security -> of tarsum has not been verified. It is recommended that most implementations -> use sha256 for interoperability. - -While the _algorithm_ does allow one to implement a wide variety of -algorithms, compliant implementations should use sha256. Heavy processing of -input before calculating a hash is discouraged to avoid degrading the -uniqueness of the _digest_ but some canonicalization may be performed to -ensure consistent identifiers. - -Let's use a simple example in pseudo-code to demonstrate a digest calculation: - -``` -let C = 'a small string' -let B = sha256(C) -let D = 'sha256:' + EncodeHex(B) -let ID(C) = D -``` - -Above, we have bytestring _C_ passed into a function, _SHA256_, that returns a -bytestring B, which is the hash of _C_. _D_ gets the algorithm concatenated -with the hex encoding of _B_. We then define the identifier of _C_ to _ID(C)_ -as equal to _D_. A digest can be verified by independently calculating _D_ and -comparing it with identifier _ID(C)_ - -#### Digest Header - -To provide verification of http content, any response may include a `Docker- -Content-Digest` header. This will include the digest of the target entity -returned in the response. For blobs, this is the entire blob content. For -manifests, this is the manifest body without the signature content, also known -as the JWS payload. Note that the commonly used canonicalization for digest -calculation may be dependent on the mediatype of the content, such as with -manifests. - -The client may choose to ignore the header or may verify it to ensure content -integrity and transport security. This is most important when fetching by a -digest. To ensure security, the content should be verified against the digest -used to fetch the content. At times, the returned digest may differ from that -used to initiate a request. Such digests are considered to be from different -_domains_, meaning they have different values for _algorithm_. In such a case, -the client may choose to verify the digests in both domains or ignore the -server's digest. To maintain security, the client _must_ always verify the -content against the _digest_ used to fetch the content. - -> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use -> the same digest used to fetch the content to verify it. The header `Docker- -> Content-Digest` should not be trusted over the "local" digest. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including tarsum) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -tarsums to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their tarsum digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by tarsum digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an -opaque field, to be interpreted by the tarsum library. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the tarsum specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the tarsums will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the upload can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the upload will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. The initial version of the registry API -will support a tarsum digest, in the standard tarsum format. For example, a -HTTP URI parameter might be as follows: - -``` -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -result in this tarsum. Optionally, the registry can support other other digest -parameters for non-tarfile content stored as a layer. A regular hash digest -might be specified as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Such a parameter would be used to verify that the binary content (as opposed -to the tar content) would be verified at the end of the upload process. - -For the initial version, registry servers are only required to support the -tarsum format. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -### Deleting a Layer - -A layer may be deleted from the registry via its `name` and `digest`. A -delete may be issued with the following request format: - - DELETE /v2//blobs/ - -If the blob exists and has been successfully deleted, the following response -will be issued: - - 202 Accepted - Content-Length: None - -If the blob had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -If a layer is deleted which is referenced by a manifest in the registry, -then the complete images will not be resolvable. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob, which will be a tarsum. An error is returned for -each unknown blob. The response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -### Listing Repositories - -Images are stored in collections, known as a _repository_, which is keyed by a -`name`, as seen throughout the API specification. A registry instance may -contain several repositories. The list of available repositories is made -available through the _catalog_. - -The catalog for a given registry can be retrieved with the following request: - -``` -GET /v2/_catalog -``` - -The response will be in the following format: - -``` -200 OK -Content-Type: application/json - -{ - "repositories": [ - , - ... - ] -} -``` - -Note that the contents of the response are specific to the registry -implementation. Some registries may opt to provide a full catalog output, -limit it based on the user's access level or omit upstream results, if -providing mirroring functionality. Subsequently, the presence of a repository -in the catalog listing only means that the registry *may* provide access to -the repository at the time of the request. Conversely, a missing entry does -*not* mean that the registry does not have the repository. More succinctly, -the presence of a repository only guarantees that it is there but not that it -is _not_ there. - -For registries with a large number of repositories, this response may be quite -large. If such a response is expected, one should use pagination. - -#### Pagination - -Paginated catalog results can be retrieved by adding an `n` parameter to the -request URL, declaring that the response should be limited to `n` results. -Starting a paginated flow begins as follows: - -``` -GET /v2/_catalog?n= -``` - -The above specifies that a catalog response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "repositories": [ - , - ... - ] -} -``` - -The above includes the _first_ `n` entries from the result set. To get the -_next_ `n` entries, one can create a URL where the argument `last` has the -value from `repositories[len(repositories)-1]`. If there are indeed more -results, the URL for the next block is encoded in an -[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" -relation. The presence of the `Link` header communicates to the client that -the entire result set has not been returned and another request must be -issued. If the header is not present, the client can assume that all results -have been recieved. - -> __NOTE:__ In the request template above, note that the brackets -> are required. For example, if the url is -> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would -> be `; rel="next"`. Please see -> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. - -Compliant client implementations should always use the `Link` header -value when proceeding through results linearly. The client may construct URLs -to skip forward in the catalog. - -To get the next result set, a client would issue the request as follows, using -the URL encoded in the described `Link` header: - -``` -GET /v2/_catalog?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set. - -The catalog result set is represented abstractly as a lexically sorted list, -where the position in that list can be specified by the query term `last`. The -entries in the response start _after_ the term specified by `last`, up to `n` -entries. - -The behavior of `last` is quite simple when demonstrated with an example. Let -us say the registry has the following repositories: - -``` -a -b -c -d -``` - -If the value of `n` is 2, _a_ and _b_ will be returned on the first response. -The `Link` header returned on the response will have `n` set to 2 and last set -to _b_: - -``` -Link: <?n=2&last=b>; rel="next" -``` - -The client can then issue the request with above value from the `Link` header, -receiving the values _c_ and _d_. Note that n may change on second to last -response or be omitted fully, if the server may so choose. - -### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large. If such a response is expected, one should use the pagination. - -#### Pagination - -Paginated tag results can be retrieved by adding the appropriate parameters to -the request URL described above. The behavior of tag pagination is identical -to that specified for catalog pagination. We cover a simple flow to highlight -any differences. - -Starting a paginated flow may begin as follows: - -``` -GET /v2//tags/list?n= -``` - -The above specifies that a tags response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -To get the next result set, a client would issue the request as follows, using -the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` -header: - -``` -GET /v2//tags/list?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set in the response. The behavior of the `last` parameter, the provided -response result, lexical ordering and encoding of the `Link` header are -identical to that of catalog pagination. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| -|------|----|------|-----------| -| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | -| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | -| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | -| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | -| DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | -| POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | -| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | -| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | -| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | -| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | -| GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | - - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| -|----|-------|-----------| - `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. - `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. - `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. - `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. - `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. - `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. - `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. - `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. - `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. - `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. - `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. - `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. - `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. - `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. - - - -### Base - -Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization. - - - -#### GET Base - -Check that the endpoint implements Docker Registry API V2. - - - -``` -GET /v2/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| - - - - -###### On Success: OK - -``` -200 OK -``` - -The API implements V2 protocol and is accessible. - - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authorized to access the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -``` - -The registry does not implement the V2 API. - - - - - -### Tags - -Retrieve information about tags. - - - -#### GET Tags - -Fetch the tags under the repository identified by `name`. - - -##### Tags - -``` -GET /v2//tags/list -Host: -Authorization: -``` - -Return all tags for the repository - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -A list of tags for the named repository. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -##### Tags Paginated - -``` -GET /v2//tags/list?n=&last= -``` - -Return a portion of the tags for the specified repository. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`name`|path|Name of the target repository.| -|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| -|`last`|query|Result set will include values lexically after last.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Link: <?n=&last=>; rel="next" -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tags": [ - , - ... - ], -} -``` - -A list of tags for the named repository. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| -|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| - - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - - - -### Manifest - -Create, update, delete and retrieve manifests. - - - -#### GET Manifest - -Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -GET /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`reference`|path|Tag or digest of the target manifest.| - - - - -###### On Success: OK - -``` -200 OK -Docker-Content-Digest: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -} -``` - -The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The name or reference was invalid. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The named manifest is not known to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | - - - - -#### PUT Manifest - -Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -PUT /v2//manifests/ -Host: -Authorization: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -} -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`reference`|path|Tag or digest of the target manifest.| - - - - -###### On Success: Created - -``` -201 Created -Location: -Content-Length: 0 -Docker-Content-Digest: -``` - -The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The canonical location url of the uploaded manifest.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Invalid Manifest - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | -| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | -| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have permission to push to the repository. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Missing Layer(s) - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -} -``` - -One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - - -#### DELETE Manifest - -Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. - - - -``` -DELETE /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`reference`|path|Tag or digest of the target manifest.| - - - - -###### On Success: Accepted - -``` -202 Accepted -``` - - - - - - -###### On Failure: Invalid Name or Reference - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `reference` were invalid and the delete was unable to proceed. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Unknown Manifest - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | - - - - - -### Blob - -Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. - - - -#### GET Blob - -Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. - - -##### Fetch Blob - -``` -GET /v2//blobs/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Docker-Content-Digest: -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob content.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - -###### On Success: Temporary Redirect - -``` -307 Temporary Redirect -Location: -Docker-Content-Digest: -``` - -The blob identified by `digest` is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location where the layer should be accessible.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The blob, identified by `name` and `digest`, is unknown to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -##### Fetch Blob Part - -``` -GET /v2//blobs/ -Host: -Authorization: -Range: bytes=- -``` - -This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Range`|header|HTTP Range header specifying blob chunk.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: Partial Content - -``` -206 Partial Content -Content-Length: -Content-Range: bytes -/ -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob chunk.| -|`Content-Range`|Content range of blob chunk.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. - - - - -#### DELETE Blob - -Delete the blob identified by `name` and `digest` - - - -``` -DELETE /v2//blobs/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Content-Length: 0 -Docker-Content-Digest: -``` - - - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Zero| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The blob, identified by `name` and `digest`, is unknown to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Method Not Allowed - -``` -405 Method Not Allowed -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -Delete is not enabled on the registry - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - - - -### Initiate Blob Upload - -Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. - - - -#### POST Initiate Blob Upload - -Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. - - -##### Initiate Monolithic Blob Upload - -``` -POST /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Length: -Content-Type: application/octect-stream - - -``` - -Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|| -|`name`|path|Name of the target repository.| -|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| - - - - -###### On Success: Created - -``` -201 Created -Location: -Content-Length: 0 -Docker-Upload-UUID: -``` - -The blob has been created in the registry and is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -##### Initiate Resumable Blob Upload - -``` -POST /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Initiate a resumable blob upload with an empty request body. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Content-Length: 0 -Location: /v2//blobs/uploads/ -Range: 0-0 -Docker-Upload-UUID: -``` - -The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - - - -### Blob Upload - -Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. - - - -#### GET Blob Upload - -Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. - - - -``` -GET /v2//blobs/uploads/ -Host: -Authorization: -``` - -Retrieve the progress of the current upload, as reported by the `Range` header. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Upload Progress - -``` -204 No Content -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The upload is known and in progress. The last received offset is available in the `Range` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - - -#### PATCH Blob Upload - -Upload a chunk of data for the specified upload. - - -##### Stream upload - -``` -PATCH /v2//blobs/uploads/ -Host: -Authorization: -Content-Type: application/octet-stream - - -``` - -Upload a stream of data to upload without completing the upload. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Data Accepted - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -##### Chunked upload - -``` -PATCH /v2//blobs/uploads/ -Host: -Authorization: -Content-Range: - -Content-Length: -Content-Type: application/octet-stream - - -``` - -Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Chunk Accepted - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. - - - - -#### PUT Blob Upload - -Complete the upload specified by `uuid`, optionally appending the body as the final chunk. - - - -``` -PUT /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Length: -Content-Type: application/octet-stream - - -``` - -Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| -|`digest`|query|Digest of uploaded blob.| - - - - -###### On Success: Upload Complete - -``` -204 No Content -Location: -Content-Range: - -Content-Length: 0 -Docker-Content-Digest: -``` - -The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The canonical location of the blob for retrieval| -|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to push to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - - -#### DELETE Blob Upload - -Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. - - - -``` -DELETE /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Cancel the upload specified by `uuid`. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Upload Deleted - -``` -204 No Content -Content-Length: 0 -``` - -The upload has been successfully deleted. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -An error was encountered processing the delete. The client may ignore this error. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Unauthorized - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -} -``` - -The client does not have access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON error response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | access to the requested resource is not authorized | The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - - - -### Catalog - -List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. - - - -#### GET Catalog - -Retrieve a sorted, json list of repositories available in the registry. - - -##### Catalog Fetch Complete - -``` -GET /v2/_catalog -``` - -Request an unabridged list of repositories available. - - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "repositories": [ - , - ... - ] -} -``` - -Returns the unabridged list of repositories as a json response. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -##### Catalog Fetch Paginated - -``` -GET /v2/_catalog?n=&last= -``` - -Return the specified portion of repositories. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| -|`last`|query|Result set will include values lexically after last.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Link: <?n=&last=>; rel="next" -Content-Type: application/json; charset=utf-8 - -{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -} -``` - - - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| -|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| - - - - - diff --git a/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl b/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl deleted file mode 100644 index cc6bd7c5..00000000 --- a/vendor/github.com/docker/distribution/docs/spec/api.md.tmpl +++ /dev/null @@ -1,1120 +0,0 @@ - - -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -docker/docker#8093. The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8093) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -Each set of changes is given a letter corresponding to a set of modifications -that were applied to the baseline specification. These are merely for -reference and shouldn't be used outside the specification other than to -identify a set of modifications. - -
-
f
-
-
    -
  • Specify the delete API for layers and manifests.
  • -
-
- -
e
-
-
    -
  • Added support for listing registry contents.
  • -
  • Added pagination to tags API.
  • -
  • Added common approach to support pagination.
  • -
-
- -
d
-
-
    -
  • Allow repository name components to be one character.
  • -
  • Clarified that single component names are allowed.
  • -
-
- -
c
-
-
    -
  • Added section covering digest format.
  • -
  • Added more clarification that manifest cannot be deleted by tag.
  • -
-
- -
b
-
-
    -
  • Added capability of doing streaming upload to PATCH blob upload.
  • -
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • -
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • -
-
- -
a
-
-
    -
  • Added support for immutable manifest references in manifest endpoints.
  • -
  • Deleting a manifest by tag has been deprecated.
  • -
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • -
  • Added error code for unsupported operations.
  • -
-
-
- -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least one lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. -2. If a repository name has two or more path components, they must be - separated by a forward slash ("/"). -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -When a `200 OK` or `401 Unauthorized` response is returned, the -"Docker-Distribution-API-Version" header should be set to "registry/2.0". -Clients may require this header value to determine if the endpoint serves this -API. When this header is omitted, clients may fallback to an older API version. - -### Content Digests - -This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). -The core of this design is the concept of a content addressable identifier. It -uniquely identifies content by taking a collision-resistant hash of the bytes. -Such an identifier can be independently calculated and verified by selection -of a common _algorithm_. If such an identifier can be communicated in a secure -manner, one can retrieve the content from an insecure source, calculate it -independently and be certain that the correct content was obtained. Put simply, -the identifier is a property of the content. - -To disambiguate from other concepts, we call this identifier a _digest_. A -_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ -portion. The _algorithm_ identifies the methodology used to calculate the -digest. The _hex_ portion is the hex-encoded result of the hash. - -We define a _digest_ string to match the following grammar: - - digest := algorithm ":" hex - algorithm := /[A-Fa-f0-9_+.-]+/ - hex := /[A-Fa-f0-9]+/ - -Some examples of _digests_ include the following: - -digest | description | -----------------------------------------------------------------------------------|------------------------------------------------ -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. | - -> __NOTE:__ While we show an example of using a `tarsum` digest, the security -> of tarsum has not been verified. It is recommended that most implementations -> use sha256 for interoperability. - -While the _algorithm_ does allow one to implement a wide variety of -algorithms, compliant implementations should use sha256. Heavy processing of -input before calculating a hash is discouraged to avoid degrading the -uniqueness of the _digest_ but some canonicalization may be performed to -ensure consistent identifiers. - -Let's use a simple example in pseudo-code to demonstrate a digest calculation: - -``` -let C = 'a small string' -let B = sha256(C) -let D = 'sha256:' + EncodeHex(B) -let ID(C) = D -``` - -Above, we have bytestring _C_ passed into a function, _SHA256_, that returns a -bytestring B, which is the hash of _C_. _D_ gets the algorithm concatenated -with the hex encoding of _B_. We then define the identifier of _C_ to _ID(C)_ -as equal to _D_. A digest can be verified by independently calculating _D_ and -comparing it with identifier _ID(C)_ - -#### Digest Header - -To provide verification of http content, any response may include a `Docker- -Content-Digest` header. This will include the digest of the target entity -returned in the response. For blobs, this is the entire blob content. For -manifests, this is the manifest body without the signature content, also known -as the JWS payload. Note that the commonly used canonicalization for digest -calculation may be dependent on the mediatype of the content, such as with -manifests. - -The client may choose to ignore the header or may verify it to ensure content -integrity and transport security. This is most important when fetching by a -digest. To ensure security, the content should be verified against the digest -used to fetch the content. At times, the returned digest may differ from that -used to initiate a request. Such digests are considered to be from different -_domains_, meaning they have different values for _algorithm_. In such a case, -the client may choose to verify the digests in both domains or ignore the -server's digest. To maintain security, the client _must_ always verify the -content against the _digest_ used to fetch the content. - -> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use -> the same digest used to fetch the content to verify it. The header `Docker- -> Content-Digest` should not be trusted over the "local" digest. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including tarsum) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -tarsums to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their tarsum digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by tarsum digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an -opaque field, to be interpreted by the tarsum library. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the tarsum specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the tarsums will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the upload can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the upload will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. The initial version of the registry API -will support a tarsum digest, in the standard tarsum format. For example, a -HTTP URI parameter might be as follows: - -``` -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -result in this tarsum. Optionally, the registry can support other other digest -parameters for non-tarfile content stored as a layer. A regular hash digest -might be specified as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Such a parameter would be used to verify that the binary content (as opposed -to the tar content) would be verified at the end of the upload process. - -For the initial version, registry servers are only required to support the -tarsum format. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -### Deleting a Layer - -A layer may be deleted from the registry via its `name` and `digest`. A -delete may be issued with the following request format: - - DELETE /v2//blobs/ - -If the blob exists and has been successfully deleted, the following response -will be issued: - - 202 Accepted - Content-Length: None - -If the blob had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -If a layer is deleted which is referenced by a manifest in the registry, -then the complete images will not be resolvable. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob, which will be a tarsum. An error is returned for -each unknown blob. The response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -### Listing Repositories - -Images are stored in collections, known as a _repository_, which is keyed by a -`name`, as seen throughout the API specification. A registry instance may -contain several repositories. The list of available repositories is made -available through the _catalog_. - -The catalog for a given registry can be retrieved with the following request: - -``` -GET /v2/_catalog -``` - -The response will be in the following format: - -``` -200 OK -Content-Type: application/json - -{ - "repositories": [ - , - ... - ] -} -``` - -Note that the contents of the response are specific to the registry -implementation. Some registries may opt to provide a full catalog output, -limit it based on the user's access level or omit upstream results, if -providing mirroring functionality. Subsequently, the presence of a repository -in the catalog listing only means that the registry *may* provide access to -the repository at the time of the request. Conversely, a missing entry does -*not* mean that the registry does not have the repository. More succinctly, -the presence of a repository only guarantees that it is there but not that it -is _not_ there. - -For registries with a large number of repositories, this response may be quite -large. If such a response is expected, one should use pagination. - -#### Pagination - -Paginated catalog results can be retrieved by adding an `n` parameter to the -request URL, declaring that the response should be limited to `n` results. -Starting a paginated flow begins as follows: - -``` -GET /v2/_catalog?n= -``` - -The above specifies that a catalog response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "repositories": [ - , - ... - ] -} -``` - -The above includes the _first_ `n` entries from the result set. To get the -_next_ `n` entries, one can create a URL where the argument `last` has the -value from `repositories[len(repositories)-1]`. If there are indeed more -results, the URL for the next block is encoded in an -[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" -relation. The presence of the `Link` header communicates to the client that -the entire result set has not been returned and another request must be -issued. If the header is not present, the client can assume that all results -have been recieved. - -> __NOTE:__ In the request template above, note that the brackets -> are required. For example, if the url is -> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would -> be `; rel="next"`. Please see -> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. - -Compliant client implementations should always use the `Link` header -value when proceeding through results linearly. The client may construct URLs -to skip forward in the catalog. - -To get the next result set, a client would issue the request as follows, using -the URL encoded in the described `Link` header: - -``` -GET /v2/_catalog?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set. - -The catalog result set is represented abstractly as a lexically sorted list, -where the position in that list can be specified by the query term `last`. The -entries in the response start _after_ the term specified by `last`, up to `n` -entries. - -The behavior of `last` is quite simple when demonstrated with an example. Let -us say the registry has the following repositories: - -``` -a -b -c -d -``` - -If the value of `n` is 2, _a_ and _b_ will be returned on the first response. -The `Link` header returned on the response will have `n` set to 2 and last set -to _b_: - -``` -Link: <?n=2&last=b>; rel="next" -``` - -The client can then issue the request with above value from the `Link` header, -receiving the values _c_ and _d_. Note that n may change on second to last -response or be omitted fully, if the server may so choose. - -### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large. If such a response is expected, one should use the pagination. - -#### Pagination - -Paginated tag results can be retrieved by adding the appropriate parameters to -the request URL described above. The behavior of tag pagination is identical -to that specified for catalog pagination. We cover a simple flow to highlight -any differences. - -Starting a paginated flow may begin as follows: - -``` -GET /v2//tags/list?n= -``` - -The above specifies that a tags response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -To get the next result set, a client would issue the request as follows, using -the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` -header: - -``` -GET /v2//tags/list?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set in the response. The behavior of the `last` parameter, the provided -response result, lexical ordering and encoding of the `Link` header are -identical to that of catalog pagination. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| -|------|----|------|-----------| -{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | -{{end}}{{end}} - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| -|----|-------|-----------| -{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} -{{end}} - -{{range $route := .RouteDescriptors}} -### {{.Entity}} - -{{.Description}} - -{{range $method := $route.Methods}} - -#### {{.Method}} {{$route.Entity}} - -{{.Description}} - -{{if .Requests}}{{range .Requests}}{{if .Name}} -##### {{.Name}}{{end}} - -``` -{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} - -{{if or .Headers .PathParameters .QueryParameters}} -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| -{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| -{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| -{{end}}{{end}} - -{{if .Successes}} -{{range .Successes}} -###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Fields}}The following fields may be returned in the response body: - -|Name|Description| -|----|-----------| -{{range .Fields}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{if .Headers}} -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{end}}{{end}} - -{{if .Failures}} -{{range .Failures}} -###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Headers}} -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}} - -{{if .ErrorCodes}} -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -{{range $err := .ErrorCodes}}| `{{$err}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | -{{end}} - -{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} - -{{end}} diff --git a/vendor/github.com/docker/distribution/docs/spec/auth/token.md b/vendor/github.com/docker/distribution/docs/spec/auth/token.md deleted file mode 100644 index a2da9483..00000000 --- a/vendor/github.com/docker/distribution/docs/spec/auth/token.md +++ /dev/null @@ -1,425 +0,0 @@ - - - - -# Docker Registry v2 authentication via central service - -Today a Docker Registry can run in standalone mode in which there are no -authorization checks. While adding your own HTTP authorization requirements in -a proxy placed between the client and the registry can give you greater access -control, we'd like a native authorization mechanism that's public key based -with access control lists managed separately with the ability to have fine -granularity in access control on a by-key, by-user, by-namespace, and -by-repository basis. In v1 this can be configured by specifying an -`index_endpoint` in the registry's config. Clients present tokens generated by -the index and tokens are validated on-line by the registry with every request. -This results in a complex authentication and authorization loop that occurs -with every registry operation. Some people are very familiar with this image: - -![index auth](https://docs.docker.com/static_files/docker_pull_chart.png) - -The above image outlines the 6-step process in accessing the Official Docker -Registry. - -1. Contact the Docker Hub to know where I should download “samalba/busybox” -2. Docker Hub replies: - a. samalba/busybox is on Registry A - b. here are the checksums for samalba/busybox (for all layers) - c. token -3. Contact Registry A to receive the layers for samalba/busybox (all of them to - the base image). Registry A is authoritative for “samalba/busybox” but keeps - a copy of all inherited layers and serve them all from the same location. -4. Registry contacts Docker Hub to verify if token/user is allowed to download - images. -5. Docker Hub returns true/false lettings registry know if it should proceed or - error out. -6. Get the payload for all layers. - -The goal of this document is to outline a way to eliminate steps 4 and 5 from -the above process by using cryptographically signed tokens and no longer -require the client to authenticate each request with a username and password -stored locally in plain text. - -The new registry workflow is more like this: - -![v2 registry auth](https://docs.google.com/drawings/d/1EHZU9uBLmcH0kytDClBv6jv6WR4xZjE8RKEUw1mARJA/pub?w=480&h=360) - -1. Attempt to begin a push/pull operation with the registry. -2. If the registry requires authorization it will return a `401 Unauthorized` - HTTP response with information on how to authenticate. -3. The registry client makes a request to the authorization service for a - signed JSON Web Token. -4. The authorization service returns a token. -5. The client retries the original request with the token embedded in the - request header. -6. The Registry authorizes the client and begins the push/pull session as - usual. - -## Requirements - -- Registry Clients capable of generating key pairs which can be used to - authenticate to an authorization server. -- An authorization server capable of managing user accounts, their public keys, - and access controls to their resources hosted by any given service (such as - repositories in a Docker Registry). -- A Docker Registry capable of trusting the authorization server to sign tokens - which clients can use for authorization and the ability to verify these - tokens for single use or for use during a sufficiently short period of time. - -## Authorization Server Endpoint Descriptions - -This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) - -The described server is meant to serve as a user account and key manager and a -centralized access control list for resources hosted by other services which -wish to authenticate and manage authorizations using this services accounts and -their public keys. - -Such a service could be used by the official docker registry to authenticate -clients and verify their authorization to docker image repositories. - -Docker will need to be updated to interact with an authorization server to get -an authorization token. - -## How to authenticate - -Today, registry clients first contact the index to initiate a push or pull. -For v2, clients should contact the registry first. If the registry server -requires authentication it will return a `401 Unauthorized` response with a -`WWW-Authenticate` header detailing how to authenticate to this registry. - -For example, say I (username `jlhawn`) am attempting to push an image to the -repository `samalba/my-app`. For the registry to authorize this, I either need -`push` access to the `samalba/my-app` repository or `push` access to the whole -`samalba` namespace in general. The registry will first return this response: - -``` -HTTP/1.1 401 Unauthorized -WWW-Authenticate: Bearer realm="https://auth.docker.com/v2/token/",service="registry.docker.com",scope="repository:samalba/my-app:push" -``` - -This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) - -The client will then know to make a `GET` request to the URL -`https://auth.docker.com/v2/token/` using the `service` and `scope` values from -the `WWW-Authenticate` header. - -## Requesting a Token - -#### Query Parameters - -
-
- service -
-
- The name of the service which hosts the resource. -
-
- scope -
-
- The resource in question, formatted as one of the space-delimited - entries from the scope parameters from the WWW-Authenticate header - shown above. This query parameter should be specified multiple times if - there is more than one scope entry from the WWW-Authenticate - header. The above example would be specified as: - scope=repository:samalba/my-app:push. -
-
- account -
-
- The name of the account which the client is acting as. Optional if it - can be inferred from client authentication. -
-
- -#### Description - -Requests an authorization token for access to a specific resource hosted by a -specific service provider. Requires the client to authenticate either using a -TLS client certificate or using basic authentication (or any other kind of -digest/challenge/response authentication scheme if the client doesn't support -TLS client certs). If the key in the client certificate is linked to an account -then the token is issued for that account key. If the key in the certificate is -linked to multiple accounts then the client must specify the `account` query -parameter. The returned token is in JWT (JSON Web Token) format, signed using -the authorization server's private key. - -#### Example - -For this example, the client makes an HTTP request to the following endpoint -over TLS using a client certificate with the server being configured to allow a -non-verified issuer during the handshake (i.e., a self-signed client cert is -okay). - -``` -GET /v2/token/?service=registry.docker.com&scope=repository:samalba/my-app:push&account=jlhawn HTTP/1.1 -Host: auth.docker.com -``` - -The server first inspects the client certificate to extract the subject key and -lookup which account it is associated with. The client is now authenticated -using that account. - -The server next searches its access control list for the account's access to -the repository `samalba/my-app` hosted by the service `registry.docker.com`. - -The server will now construct a JSON Web Token to sign and return. A JSON Web -Token has 3 main parts: - -1. Headers - - The header of a JSON Web Token is a standard JOSE header. The "typ" field - will be "JWT" and it will also contain the "alg" which identifies the - signing algorithm used to produce the signature. It will also usually have - a "kid" field, the ID of the key which was used to sign the token. - - Here is an example JOSE Header for a JSON Web Token (formatted with - whitespace for readability): - - ``` - { - "typ": "JWT", - "alg": "ES256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" - } - ``` - - It specifies that this object is going to be a JSON Web token signed using - the key with the given ID using the Elliptic Curve signature algorithm - using a SHA256 hash. - -2. Claim Set - - The Claim Set is a JSON struct containing these standard registered claim - name fields: - -
-
- iss (Issuer) -
-
- The issuer of the token, typically the fqdn of the authorization - server. -
-
- sub (Subject) -
-
- The subject of the token; the id of the client which requested it. -
-
- aud (Audience) -
-
- The intended audience of the token; the id of the service which - will verify the token to authorize the client/subject. -
-
- exp (Expiration) -
-
- The token should only be considered valid up to this specified date - and time. -
-
- nbf (Not Before) -
-
- The token should not be considered valid before this specified date - and time. -
-
- iat (Issued At) -
-
- Specifies the date and time which the Authorization server - generated this token. -
-
- jti (JWT ID) -
-
- A unique identifier for this token. Can be used by the intended - audience to prevent replays of the token. -
-
- - The Claim Set will also contain a private claim name unique to this - authorization server specification: - -
-
- access -
-
- An array of access entry objects with the following fields: - -
-
- type -
-
- The type of resource hosted by the service. -
-
- name -
-
- The name of the resource of the given type hosted by the - service. -
-
- actions -
-
- An array of strings which give the actions authorized on - this resource. -
-
-
-
- - Here is an example of such a JWT Claim Set (formatted with whitespace for - readability): - - ``` - { - "iss": "auth.docker.com", - "sub": "jlhawn", - "aud": "registry.docker.com", - "exp": 1415387315, - "nbf": 1415387015, - "iat": 1415387015, - "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", - "access": [ - { - "type": "repository", - "name": "samalba/my-app", - "actions": [ - "push" - ] - } - ] - } - ``` - -3. Signature - - The authorization server will produce a JOSE header and Claim Set with no - extraneous whitespace, i.e., the JOSE Header from above would be - - ``` - {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} - ``` - - and the Claim Set from above would be - - ``` - {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push"]}]} - ``` - - The utf-8 representation of this JOSE header and Claim Set are then - url-safe base64 encoded (sans trailing '=' buffer), producing: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 - ``` - - for the JOSE Header and - - ``` - eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - for the Claim Set. These two are concatenated using a '.' character, - yielding the string: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - This is then used as the payload to a the `ES256` signature algorithm - specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) - draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) - - This example signature will use the following ECDSA key for the server: - - ``` - { - "kty": "EC", - "crv": "P-256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", - "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", - "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", - "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" - } - ``` - - A resulting signature of the above payload using this key is: - - ``` - QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - - Concatenating all of these together with a `.` character gives the - resulting JWT: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - -This can now be placed in an HTTP response and returned to the client to use to -authenticate to the audience service: - - -``` -HTTP/1.1 200 OK -Content-Type: application/json - -{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} -``` - -## Using the signed token - -Once the client has a token, it will try the registry request again with the -token placed in the HTTP `Authorization` header like so: - -``` -Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw -``` - -This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) - -## Verifying the token - -The registry must now verify the token presented by the user by inspecting the -claim set within. The registry will: - -- Ensure that the issuer (`iss` claim) is an authority it trusts. -- Ensure that the registry identifies as the audience (`aud` claim). -- Check that the current time is between the `nbf` and `exp` claim times. -- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has - not been seen before. - - To enforce this, the registry may keep a record of `jti`s it has seen for - up to the `exp` time of the token to prevent token replays. -- Check the `access` claim value and use the identified resources and the list - of actions authorized to determine whether the token grants the required - level of access for the operation the client is attempting to perform. -- Verify that the signature of the token is valid. - -At no point in this process should the registry need to call back to -the authorization server. If anything, it would only need to update a list of -trusted public keys for verifying token signatures or use a separate API -(still to be spec'd) to add/update resource records on the authorization -server. diff --git a/vendor/github.com/docker/distribution/docs/spec/implementations.md b/vendor/github.com/docker/distribution/docs/spec/implementations.md deleted file mode 100644 index 5cec148f..00000000 --- a/vendor/github.com/docker/distribution/docs/spec/implementations.md +++ /dev/null @@ -1,26 +0,0 @@ -# Distribution API Implementations - -This is a list of known implementations of the Distribution API spec. - -## [Docker Distribution Registry](https://github.com/docker/distribution) - -Docker distribution is the reference implementation of the distribution API -specification. It aims to fully implement the entire specification. - -### Releases -#### 2.0.1 (_in development_) -Implements API 2.0.1 - -_Known Issues_ - - No resumable push support - - Content ranges ignored - - Blob upload status will always return a starting range of 0 - -#### 2.0.0 -Implements API 2.0.0 - -_Known Issues_ - - No resumable push support - - No PATCH implementation for blob upload - - Content ranges ignored - diff --git a/vendor/github.com/docker/distribution/docs/spec/json.md b/vendor/github.com/docker/distribution/docs/spec/json.md deleted file mode 100644 index a7b1807f..00000000 --- a/vendor/github.com/docker/distribution/docs/spec/json.md +++ /dev/null @@ -1,88 +0,0 @@ - - - - -# Docker Distribution JSON Canonicalization - -To provide consistent content hashing of JSON objects throughout Docker -Distribution APIs, the specification defines a canonical JSON format. Adopting -such a canonicalization also aids in caching JSON responses. - -## Rules - -Compliant JSON should conform to the following rules: - -1. All generated JSON should comply with [RFC - 7159](http://www.ietf.org/rfc/rfc7159.txt). -2. Resulting "JSON text" shall always be encoded in UTF-8. -3. Unless a canonical key order is defined for a particular schema, object - keys shall always appear in lexically sorted order. -4. All whitespace between tokens should be removed. -5. No "trailing commas" are allowed in object or array definitions. - -## Examples - -The following is a simple example of a canonicalized JSON string: - -```json -{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} -``` - -## Reference - -### Other Canonicalizations - -The OLPC project specifies [Canonical -JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in -[TUF](http://theupdateframework.com/), which may be used with other -distribution-related protocols, this alternative format has been proposed in -case the original source changes. Specifications complying with either this -specification or an alternative should explicitly call out the -canonicalization format. Except for key ordering, this specification is mostly -compatible. - -### Go - -In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library -will emit canonical JSON by default. Simply using `json.Marshal` will suffice -in most cases: - -```go -incoming := map[string]interface{}{ - "asdf": 1, - "qwer": []interface{}{}, - "zxcv": []interface{}{ - map[string]interface{}{}, - true, - int(1e9), - "tyui", - }, -} - -canonical, err := json.Marshal(incoming) -if err != nil { - // ... handle error -} -``` - -To apply canonical JSON format spacing to an existing serialized JSON buffer, one -can use -[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) -with the following arguments: - -```go -incoming := getBytes() -var canonical bytes.Buffer -if err := json.Indent(&canonical, incoming, "", ""); err != nil { - // ... handle error -} -``` diff --git a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md b/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md deleted file mode 100644 index 259e3cf6..00000000 --- a/vendor/github.com/docker/distribution/docs/spec/manifest-v2-1.md +++ /dev/null @@ -1,153 +0,0 @@ -# Image Manifest Version 2, Schema 1 - -This document outlines the format of of the V2 image manifest. The image -manifest described herein was introduced in the Docker daemon in the [v1.3.0 -release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). -It is a provisional manifest to provide a compatibility with the [V1 Image -format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the -requirements are defined for the [V2 Schema 2 -image](https://github.com/docker/distribution/pull/62). - - -Image manifests describe the various constituents of a docker image. Image -manifests can be serialized to JSON format with the following media types: - -Manifest Type | Media Type -------------- | ------------- -manifest | "application/vnd.docker.distribution.manifest.v1+json" -signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" - -*Note that "application/json" will also be accepted for schema 1.* - -References: - - - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) - - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) - -## *Manifest* Field Descriptions - -Manifest provides the base accessible fields for working with V2 image format - in the registry. - -- **`name`** *string* - - name is the name of the image's repository - -- **`tag`** *string* - - tag is the tag of the image - -- **`architecture`** *string* - - architecture is the host architecture on which this image is intended to - run. This is for information purposes and not currently used by the engine - -- **`fsLayers`** *array* - - fsLayers is a list of filesystem layer blob sums contained in this image. - - An fsLayer is a struct consisting of the following fields - - **`blobSum`** *digest.Digest* - - blobSum is the digest of the referenced filesystem image layer. A - digest can be a tarsum or sha256 hash. - - -- **`history`** *array* - - history is a list of unstructured historical data for v1 compatibility. - - history is a struct consisting of the following fields - - **`v1Compatibility`** string - - V1Compatibility is the raw V1 compatibility information. This will - contain the JSON object describing the V1 of this image. - -- **`schemaVersion`** *int* - - SchemaVersion is the image manifest schema that this image follows. - -## Signed Manifests - -Signed manifests provides an envelope for a signed image manifest. A signed -manifest consists of an image manifest along with an additional field -containing the signature of the manifest. - -The docker client can verify signed manifests and displays a message to the user. - -### Signing Manifests - -Image manifests can be signed in two different ways: with a *libtrust* private - key or an x509 certificate chain. When signing with an x509 certificate chain, - the public key of the first element in the chain must be the public key - corresponding with the sign key. - -### Signed Manifest Field Description - -Signed manifests include an image manifest and and a list of signatures generated -by *libtrust*. A signature consists of the following fields: - - -- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* - - A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) - -- **`signature`** *string* - - A signature for the image manifest, signed by a *libtrust* private key - -- **`protected`** *string* - - The signed protected header - -## Example Manifest - -*Example showing the official 'hello-world' image manifest.* - -``` -{ - "name": "hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - ], - "schemaVersion": 1, - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", - "kty": "EC", - "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", - "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" - }, - "alg": "ES256" - }, - "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", - "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" - } - ] -} - -``` diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/azure.md b/vendor/github.com/docker/distribution/docs/storage-drivers/azure.md deleted file mode 100644 index f994f38a..00000000 --- a/vendor/github.com/docker/distribution/docs/storage-drivers/azure.md +++ /dev/null @@ -1,24 +0,0 @@ - - - -# Microsoft Azure storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage. - -## Parameters - -The following parameters must be used to authenticate and configure the storage driver (case-sensitive): - -* `accountname`: Name of the Azure Storage Account. -* `accountkey`: Primary or Secondary Key for the Storage Account. -* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. -* `realm`: (optional) Domain name suffix for the Storage Service API endpoint. Defaults to `core.windows.net`. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. - -[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ -[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md b/vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md deleted file mode 100644 index 2dbad8cd..00000000 --- a/vendor/github.com/docker/distribution/docs/storage-drivers/filesystem.md +++ /dev/null @@ -1,16 +0,0 @@ - - - -# Filesystem storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. - -## Parameters - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/var/lib/registry`. diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md b/vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md deleted file mode 100644 index f43e1510..00000000 --- a/vendor/github.com/docker/distribution/docs/storage-drivers/inmemory.md +++ /dev/null @@ -1,18 +0,0 @@ - - - -# In-memory storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. - -**IMPORTANT**: This storage driver *does not* persist data across runs, and primarily exists for testing. - -## Parameters - -None diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/oss.md b/vendor/github.com/docker/distribution/docs/storage-drivers/oss.md deleted file mode 100755 index 748a31da..00000000 --- a/vendor/github.com/docker/distribution/docs/storage-drivers/oss.md +++ /dev/null @@ -1,31 +0,0 @@ - - -# Aliyun OSS storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. - -## Parameters - -* `accesskeyid`: Your access key ID. - -* `accesskeysecret`: Your access key secret. - -* `region`: The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at - -* `endpoint`: (optional) By default, the endpoint shoulb be `..aliyuncs.com` or `.-internal.aliyuncs.com` (when internal=true). You can change the default endpoint via changing this value. - -* `internal`: (optional) Using internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at - -* `bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). - -* `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). - -* `secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to false if not specified. - -* `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. - -* `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/rados.md b/vendor/github.com/docker/distribution/docs/storage-drivers/rados.md deleted file mode 100644 index 4b630e19..00000000 --- a/vendor/github.com/docker/distribution/docs/storage-drivers/rados.md +++ /dev/null @@ -1,40 +0,0 @@ - - - -# Ceph RADOS storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses -[Ceph RADOS Object Storage][rados] for storage backend. - -## Parameters - -The following parameters must be used to configure the storage driver -(case-sensitive): - -* `poolname`: Name of the Ceph pool -* `username` *optional*: The user to connect as (i.e. admin, not client.admin) -* `chunksize` *optional*: Size of the written RADOS objects. Default value is -4MB (4194304). - -This drivers loads the [Ceph client configuration][rados-config] from the -following regular paths (the first found is used): - -* `$CEPH_CONF` (environment variable) -* `/etc/ceph/ceph.conf` -* `~/.ceph/config` -* `ceph.conf` (in the current working directory) - -## Developing - -To include this driver when building Docker Distribution, use the build tag -`include_rados`. Please see the [building documentation][building] for details. - -[rados]: http://ceph.com/docs/master/rados/ -[rados-config]: http://ceph.com/docs/master/rados/configuration/ceph-conf/ -[building]: https://github.com/docker/distribution/blob/master/docs/building.md#optional-build-tags diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/s3.md b/vendor/github.com/docker/distribution/docs/storage-drivers/s3.md deleted file mode 100644 index 8dc3b234..00000000 --- a/vendor/github.com/docker/distribution/docs/storage-drivers/s3.md +++ /dev/null @@ -1,34 +0,0 @@ - - - -# S3 storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. - -## Parameters - -`accesskey`: Your aws access key. - -`secretkey`: Your aws secret key. - -**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. - -`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html - -`bucket`: The name of your s3 bucket where you wish to store objects (needs to already be created prior to driver initialization). - -`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). - -`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. - -`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) - -`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to s3. The default is 10 MB. Keep in mind that the minimum part size for s3 is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to s3. - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/vendor/github.com/docker/distribution/docs/storage-drivers/swift.md b/vendor/github.com/docker/distribution/docs/storage-drivers/swift.md deleted file mode 100644 index 372cb6ab..00000000 --- a/vendor/github.com/docker/distribution/docs/storage-drivers/swift.md +++ /dev/null @@ -1,139 +0,0 @@ - - - -# OpenStack Swift storage driver - -An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. - -## Parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- authurl - -

URL for obtaining an auth token.

-
- username - -

- Your OpenStack user name.

-

-
- password -

-

-

- Your OpenStack password. -

-
- container - -

- The name of your Swift container where you wish to store objects. An - additional container called _segments stores the data - is used. The driver creates both the named container and the segments - container during its initialization. -

-
- tenant - -

- Optionally, your OpenStack tenant name. You can either use tenant or tenantid. -

-
- tenantid - -

- Optionally, your OpenStack tenant id. You can either use tenant or tenantid. -

-
- domain - -

- Optionally, your OpenStack domain name for Identity v3 API. You can either use domain or domainid. -

-
- domainid - -

- Optionally, your OpenStack domain id for Identity v3 API. You can either use domain or domainid. -

-
- insecureskipverify - -

- Optionally, set insecureskipverify to true to skip TLS verification for your OpenStack provider. The driver uses false by default. -

-
- region - -

- Optionally, specify the OpenStack region name in which you would like to store objects (for example fr). -

-
- chunksize - -

- Optionally, specify the segment size for Dynamic Large Objects uploads (performed by WriteStream) to Swift. The default is 5 MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to Swift. -

-
- prefix - -

- Optionally, supply the root directory tree in which to store all registry files. Defaults to the empty string which is the container's root.

-

-
\ No newline at end of file diff --git a/vendor/github.com/docker/distribution/docs/storagedrivers.md b/vendor/github.com/docker/distribution/docs/storagedrivers.md deleted file mode 100644 index b014049c..00000000 --- a/vendor/github.com/docker/distribution/docs/storagedrivers.md +++ /dev/null @@ -1,61 +0,0 @@ - - - -# Docker Registry Storage Driver - -This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. - -## Provided Drivers - -This storage driver package comes bundled with several drivers: - -- [inmemory](storage-drivers/inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. -- [filesystem](storage-drivers/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. -- [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. -- [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). -- [rados](storage-drivers/rados.md): A driver storing objects in a [Ceph Object Storage](http://ceph.com/docs/master/rados/) pool. -- [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). -- [oss](storage-drivers/oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). - -## Storage Driver API - -The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. - -Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. - -Storage drivers are intended to be written in Go, providing compile-time -validation of the `storagedriver.StorageDriver` interface. - -## Driver Selection and Configuration - -The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. - -Storage driver factories may be registered by name using the -`factory.Register` method, and then later invoked by calling `factory.Create` -with a driver name and parameters map. If no such storage driver can be found, -`factory.Create` will return an `InvalidStorageDriverError`. - -## Driver Contribution - -### Writing new storage drivers -To create a valid storage driver, one must implement the -`storagedriver.StorageDriver` interface and make sure to expose this driver -via the factory system. - -#### Registering -Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. - -## Testing -Storage driver test suites are provided in -`storagedriver/testsuites/testsuites.go` and may be used for any storage -driver written in Go. Tests can be registered using the `RegisterSuite` -function, which run the same set of tests for any registered drivers. diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go index 53def4b8..eb332d1b 100644 --- a/vendor/github.com/docker/distribution/errors.go +++ b/vendor/github.com/docker/distribution/errors.go @@ -1,12 +1,21 @@ package distribution import ( + "errors" "fmt" "strings" "github.com/docker/distribution/digest" ) +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + // ErrRepositoryUnknown is returned if the named repository is not known by // the registry. type ErrRepositoryUnknown struct { diff --git a/vendor/github.com/docker/distribution/health/api/api.go b/vendor/github.com/docker/distribution/health/api/api.go deleted file mode 100644 index 73fcc453..00000000 --- a/vendor/github.com/docker/distribution/health/api/api.go +++ /dev/null @@ -1,37 +0,0 @@ -package api - -import ( - "errors" - "net/http" - - "github.com/docker/distribution/health" -) - -var ( - updater = health.NewStatusUpdater() -) - -// DownHandler registers a manual_http_status that always returns an Error -func DownHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - updater.Update(errors.New("Manual Check")) - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// UpHandler registers a manual_http_status that always returns nil -func UpHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - updater.Update(nil) - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// init sets up the two endpoints to bring the service up and down -func init() { - health.Register("manual_http_status", updater) - http.HandleFunc("/debug/health/down", DownHandler) - http.HandleFunc("/debug/health/up", UpHandler) -} diff --git a/vendor/github.com/docker/distribution/health/api/api_test.go b/vendor/github.com/docker/distribution/health/api/api_test.go deleted file mode 100644 index ec82154f..00000000 --- a/vendor/github.com/docker/distribution/health/api/api_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package api - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution/health" -) - -// TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint -// /debug/health/down with METHOD GET returns a 404 -func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - DownHandler(recorder, req) - - if recorder.Code != 404 { - t.Errorf("Did not get a 404.") - } -} - -// TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint -// /debug/health/down with METHOD GET returns a 404 -func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - DownHandler(recorder, req) - - if recorder.Code != 404 { - t.Errorf("Did not get a 404.") - } -} - -// TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes -// the status code of the response to 503 -// This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus -func TestPOSTDownHandlerChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - DownHandler(recorder, req) - - if recorder.Code != 200 { - t.Errorf("Did not get a 200.") - } - - if len(health.CheckStatus()) != 1 { - t.Errorf("DownHandler didn't add an error check.") - } -} - -// TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes -// the status code of the response to 200 -func TestPOSTUpHandlerChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - UpHandler(recorder, req) - - if recorder.Code != 200 { - t.Errorf("Did not get a 200.") - } - - if len(health.CheckStatus()) != 0 { - t.Errorf("UpHandler didn't remove the error check.") - } -} diff --git a/vendor/github.com/docker/distribution/health/checks/checks.go b/vendor/github.com/docker/distribution/health/checks/checks.go deleted file mode 100644 index 9de14010..00000000 --- a/vendor/github.com/docker/distribution/health/checks/checks.go +++ /dev/null @@ -1,35 +0,0 @@ -package checks - -import ( - "errors" - "github.com/docker/distribution/health" - "net/http" - "os" -) - -// FileChecker checks the existence of a file and returns and error -// if the file exists, taking the application out of rotation -func FileChecker(f string) health.Checker { - return health.CheckFunc(func() error { - if _, err := os.Stat(f); err == nil { - return errors.New("file exists") - } - return nil - }) -} - -// HTTPChecker does a HEAD request and verifies if the HTTP status -// code return is a 200, taking the application out of rotation if -// otherwise -func HTTPChecker(r string) health.Checker { - return health.CheckFunc(func() error { - response, err := http.Head(r) - if err != nil { - return errors.New("error while checking: " + r) - } - if response.StatusCode != http.StatusOK { - return errors.New("downstream service returned unexpected status: " + string(response.StatusCode)) - } - return nil - }) -} diff --git a/vendor/github.com/docker/distribution/health/checks/checks_test.go b/vendor/github.com/docker/distribution/health/checks/checks_test.go deleted file mode 100644 index 4e49d118..00000000 --- a/vendor/github.com/docker/distribution/health/checks/checks_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package checks - -import ( - "testing" -) - -func TestFileChecker(t *testing.T) { - if err := FileChecker("/tmp").Check(); err == nil { - t.Errorf("/tmp was expected as exists") - } - - if err := FileChecker("NoSuchFileFromMoon").Check(); err != nil { - t.Errorf("NoSuchFileFromMoon was expected as not exists, error:%v", err) - } -} - -func TestHTTPChecker(t *testing.T) { - if err := HTTPChecker("https://www.google.cybertron").Check(); err == nil { - t.Errorf("Google on Cybertron was expected as not exists") - } - - if err := HTTPChecker("https://www.google.pt").Check(); err != nil { - t.Errorf("Google at Portugal was expected as exists, error:%v", err) - } -} diff --git a/vendor/github.com/docker/distribution/health/doc.go b/vendor/github.com/docker/distribution/health/doc.go deleted file mode 100644 index 8faa32f7..00000000 --- a/vendor/github.com/docker/distribution/health/doc.go +++ /dev/null @@ -1,130 +0,0 @@ -// Package health provides a generic health checking framework. -// The health package works expvar style. By importing the package the debug -// server is getting a "/debug/health" endpoint that returns the current -// status of the application. -// If there are no errors, "/debug/health" will return a HTTP 200 status, -// together with an empty JSON reply "{}". If there are any checks -// with errors, the JSON reply will include all the failed checks, and the -// response will be have an HTTP 503 status. -// -// A Check can either be run synchronously, or asynchronously. We recommend -// that most checks are registered as an asynchronous check, so a call to the -// "/debug/health" endpoint always returns immediately. This pattern is -// particularly useful for checks that verify upstream connectivity or -// database status, since they might take a long time to return/timeout. -// -// Installing -// -// To install health, just import it in your application: -// -// import "github.com/docker/distribution/health" -// -// You can also (optionally) import "health/api" that will add two convenience -// endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add -// "manual" checks that allow the service to quickly be brought in/out of -// rotation. -// -// import _ "github.com/docker/distribution/registry/health/api" -// -// # curl localhost:5001/debug/health -// {} -// # curl -X POST localhost:5001/debug/health/down -// # curl localhost:5001/debug/health -// {"manual_http_status":"Manual Check"} -// -// After importing these packages to your main application, you can start -// registering checks. -// -// Registering Checks -// -// The recommended way of registering checks is using a periodic Check. -// PeriodicChecks run on a certain schedule and asynchronously update the -// status of the check. This allows "CheckStatus()" to return without blocking -// on an expensive check. -// -// A trivial example of a check that runs every 5 seconds and shuts down our -// server if the current minute is even, could be added as follows: -// -// func currentMinuteEvenCheck() error { -// m := time.Now().Minute() -// if m%2 == 0 { -// return errors.New("Current minute is even!") -// } -// return nil -// } -// -// health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) -// -// Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to -// implement the exact same check, but add a threshold of failures after which -// the check will be unhealthy. This is particularly useful for flaky Checks, -// ensuring some stability of the service when handling them. -// -// health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) -// -// The lowest-level way to interact with the health package is calling -// "Register" directly. Register allows you to pass in an arbitrary string and -// something that implements "Checker" and runs your check. If your method -// returns an error with nil, it is considered a healthy check, otherwise it -// will make the health check endpoint "/debug/health" start returning a 503 -// and list the specific check that failed. -// -// Assuming you wish to register a method called "currentMinuteEvenCheck() -// error" you could do that by doing: -// -// health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) -// -// CheckFunc is a convenience type that implements Checker. -// -// Another way of registering a check could be by using an anonymous function -// and the convenience method RegisterFunc. An example that makes the status -// endpoint always return an error: -// -// health.RegisterFunc("my_check", func() error { -// return Errors.new("This is an error!") -// })) -// -// Examples -// -// You could also use the health checker mechanism to ensure your application -// only comes up if certain conditions are met, or to allow the developer to -// take the service out of rotation immediately. An example that checks -// database connectivity and immediately takes the server out of rotation on -// err: -// -// updater = health.NewStatusUpdater() -// health.RegisterFunc("database_check", func() error { -// return updater.Check() -// })) -// -// conn, err := Connect(...) // database call here -// if err != nil { -// updater.Update(errors.New("Error connecting to the database: " + err.Error())) -// } -// -// You can also use the predefined Checkers that come included with the health -// package. First, import the checks: -// -// import "github.com/docker/distribution/health/checks -// -// After that you can make use of any of the provided checks. An example of -// using a `FileChecker` to take the application out of rotation if a certain -// file exists can be done as follows: -// -// health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) -// -// After registering the check, it is trivial to take an application out of -// rotation from the console: -// -// # curl localhost:5001/debug/health -// {} -// # touch /tmp/disable -// # curl localhost:5001/debug/health -// {"fileChecker":"file exists"} -// -// You could also test the connectivity to a downstream service by using a -// "HTTPChecker", but ensure that you only mark the test unhealthy if there -// are a minimum of two failures in a row: -// -// health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) -package health diff --git a/vendor/github.com/docker/distribution/health/health.go b/vendor/github.com/docker/distribution/health/health.go deleted file mode 100644 index 8a4df776..00000000 --- a/vendor/github.com/docker/distribution/health/health.go +++ /dev/null @@ -1,217 +0,0 @@ -package health - -import ( - "encoding/json" - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - registeredChecks = make(map[string]Checker) -) - -// Checker is the interface for a Health Checker -type Checker interface { - // Check returns nil if the service is okay. - Check() error -} - -// CheckFunc is a convenience type to create functions that implement -// the Checker interface -type CheckFunc func() error - -// Check Implements the Checker interface to allow for any func() error method -// to be passed as a Checker -func (cf CheckFunc) Check() error { - return cf() -} - -// Updater implements a health check that is explicitly set. -type Updater interface { - Checker - - // Update updates the current status of the health check. - Update(status error) -} - -// updater implements Checker and Updater, providing an asynchronous Update -// method. -// This allows us to have a Checker that returns the Check() call immediately -// not blocking on a potentially expensive check. -type updater struct { - mu sync.Mutex - status error -} - -// Check implements the Checker interface -func (u *updater) Check() error { - u.mu.Lock() - defer u.mu.Unlock() - - return u.status -} - -// Update implements the Updater interface, allowing asynchronous access to -// the status of a Checker. -func (u *updater) Update(status error) { - u.mu.Lock() - defer u.mu.Unlock() - - u.status = status -} - -// NewStatusUpdater returns a new updater -func NewStatusUpdater() Updater { - return &updater{} -} - -// thresholdUpdater implements Checker and Updater, providing an asynchronous Update -// method. -// This allows us to have a Checker that returns the Check() call immediately -// not blocking on a potentially expensive check. -type thresholdUpdater struct { - mu sync.Mutex - status error - threshold int - count int -} - -// Check implements the Checker interface -func (tu *thresholdUpdater) Check() error { - tu.mu.Lock() - defer tu.mu.Unlock() - - if tu.count >= tu.threshold { - return tu.status - } - - return nil -} - -// thresholdUpdater implements the Updater interface, allowing asynchronous -// access to the status of a Checker. -func (tu *thresholdUpdater) Update(status error) { - tu.mu.Lock() - defer tu.mu.Unlock() - - if status == nil { - tu.count = 0 - } else if tu.count < tu.threshold { - tu.count++ - } - - tu.status = status -} - -// NewThresholdStatusUpdater returns a new thresholdUpdater -func NewThresholdStatusUpdater(t int) Updater { - return &thresholdUpdater{threshold: t} -} - -// PeriodicChecker wraps an updater to provide a periodic checker -func PeriodicChecker(check Checker, period time.Duration) Checker { - u := NewStatusUpdater() - go func() { - t := time.NewTicker(period) - for { - <-t.C - u.Update(check.Check()) - } - }() - - return u -} - -// PeriodicThresholdChecker wraps an updater to provide a periodic checker that -// uses a threshold before it changes status -func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { - tu := NewThresholdStatusUpdater(threshold) - go func() { - t := time.NewTicker(period) - for { - <-t.C - tu.Update(check.Check()) - } - }() - - return tu -} - -// CheckStatus returns a map with all the current health check errors -func CheckStatus() map[string]string { - mutex.RLock() - defer mutex.RUnlock() - statusKeys := make(map[string]string) - for k, v := range registeredChecks { - err := v.Check() - if err != nil { - statusKeys[k] = err.Error() - } - } - - return statusKeys -} - -// Register associates the checker with the provided name. We allow -// overwrites to a specific check status. -func Register(name string, check Checker) { - mutex.Lock() - defer mutex.Unlock() - _, ok := registeredChecks[name] - if ok { - panic("Check already exists: " + name) - } - registeredChecks[name] = check -} - -// RegisterFunc allows the convenience of registering a checker directly -// from an arbitrary func() error -func RegisterFunc(name string, check func() error) { - Register(name, CheckFunc(check)) -} - -// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker -// from an arbitrary func() error -func RegisterPeriodicFunc(name string, check func() error, period time.Duration) { - Register(name, PeriodicChecker(CheckFunc(check), period)) -} - -// RegisterPeriodicThresholdFunc allows the convenience of registering a -// PeriodicChecker from an arbitrary func() error -func RegisterPeriodicThresholdFunc(name string, check func() error, period time.Duration, threshold int) { - Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) -} - -// StatusHandler returns a JSON blob with all the currently registered Health Checks -// and their corresponding status. -// Returns 503 if any Error status exists, 200 otherwise -func StatusHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - checksStatus := CheckStatus() - // If there is an error, return 503 - if len(checksStatus) != 0 { - w.WriteHeader(http.StatusServiceUnavailable) - } - encoder := json.NewEncoder(w) - err := encoder.Encode(checksStatus) - - // Parsing of the JSON failed. Returning generic error message - if err != nil { - encoder.Encode(struct { - ServerError string `json:"server_error"` - }{ - ServerError: "Could not parse error message", - }) - } - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// Registers global /debug/health api endpoint -func init() { - http.HandleFunc("/debug/health", StatusHandler) -} diff --git a/vendor/github.com/docker/distribution/health/health_test.go b/vendor/github.com/docker/distribution/health/health_test.go deleted file mode 100644 index 7989f0b2..00000000 --- a/vendor/github.com/docker/distribution/health/health_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package health - -import ( - "errors" - "net/http" - "net/http/httptest" - "testing" -) - -// TestReturns200IfThereAreNoChecks ensures that the result code of the health -// endpoint is 200 if there are not currently registered checks. -func TestReturns200IfThereAreNoChecks(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - StatusHandler(recorder, req) - - if recorder.Code != 200 { - t.Errorf("Did not get a 200.") - } -} - -// TestReturns500IfThereAreErrorChecks ensures that the result code of the -// health endpoint is 500 if there are health checks with errors -func TestReturns503IfThereAreErrorChecks(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - // Create a manual error - Register("some_check", CheckFunc(func() error { - return errors.New("This Check did not succeed") - })) - - StatusHandler(recorder, req) - - if recorder.Code != 503 { - t.Errorf("Did not get a 503.") - } -} diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go new file mode 100644 index 00000000..88367b0a --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/vendor/github.com/docker/distribution/manifest/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go similarity index 89% rename from vendor/github.com/docker/distribution/manifest/manifest.go rename to vendor/github.com/docker/distribution/manifest/schema1/manifest.go index 48467d48..e7cbf958 100644 --- a/vendor/github.com/docker/distribution/manifest/manifest.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -1,9 +1,10 @@ -package manifest +package schema1 import ( "encoding/json" "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" "github.com/docker/libtrust" ) @@ -17,18 +18,18 @@ const ( ManifestMediaType = "application/vnd.docker.distribution.manifest.v1+json" ) -// Versioned provides a struct with just the manifest schemaVersion. Incoming -// content with unknown schema version can be decoded against this struct to -// check the version. -type Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` -} +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) // Manifest provides the base accessible fields for working with V2 image // format in the registry. type Manifest struct { - Versioned + manifest.Versioned // Name is the name of the image's repository Name string `json:"name"` @@ -61,15 +62,20 @@ type SignedManifest struct { // UnmarshalJSON populates a new ImageManifest struct from JSON data. func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.Raw = make([]byte, len(b), len(b)) + copy(sm.Raw, b) + + p, err := sm.Payload() + if err != nil { + return err + } + var manifest Manifest - if err := json.Unmarshal(b, &manifest); err != nil { + if err := json.Unmarshal(p, &manifest); err != nil { return err } sm.Manifest = manifest - sm.Raw = make([]byte, len(b), len(b)) - copy(sm.Raw, b) - return nil } diff --git a/vendor/github.com/docker/distribution/manifest/manifest_test.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go similarity index 95% rename from vendor/github.com/docker/distribution/manifest/manifest_test.go rename to vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go index 941bfde9..16cedae3 100644 --- a/vendor/github.com/docker/distribution/manifest/manifest_test.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go @@ -1,4 +1,4 @@ -package manifest +package schema1 import ( "bytes" @@ -80,11 +80,9 @@ func genEnv(t *testing.T) *testEnv { name, tag := "foo/bar", "test" m := Manifest{ - Versioned: Versioned{ - SchemaVersion: 1, - }, - Name: name, - Tag: tag, + Versioned: SchemaVersion, + Name: name, + Tag: tag, FSLayers: []FSLayer{ { BlobSum: "asdf", diff --git a/vendor/github.com/docker/distribution/manifest/sign.go b/vendor/github.com/docker/distribution/manifest/schema1/sign.go similarity index 98% rename from vendor/github.com/docker/distribution/manifest/sign.go rename to vendor/github.com/docker/distribution/manifest/schema1/sign.go index a4c37652..1b7b674a 100644 --- a/vendor/github.com/docker/distribution/manifest/sign.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/sign.go @@ -1,4 +1,4 @@ -package manifest +package schema1 import ( "crypto/x509" diff --git a/vendor/github.com/docker/distribution/manifest/verify.go b/vendor/github.com/docker/distribution/manifest/schema1/verify.go similarity index 98% rename from vendor/github.com/docker/distribution/manifest/verify.go rename to vendor/github.com/docker/distribution/manifest/schema1/verify.go index 3e051b26..60f8cda0 100644 --- a/vendor/github.com/docker/distribution/manifest/verify.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/verify.go @@ -1,4 +1,4 @@ -package manifest +package schema1 import ( "crypto/x509" diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go new file mode 100644 index 00000000..bef38292 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/versioned.go @@ -0,0 +1,9 @@ +package manifest + +// Versioned provides a struct with just the manifest schemaVersion. Incoming +// content with unknown schema version can be decoded against this struct to +// check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/docker/distribution/notifications/bridge.go b/vendor/github.com/docker/distribution/notifications/bridge.go deleted file mode 100644 index b97925a5..00000000 --- a/vendor/github.com/docker/distribution/notifications/bridge.go +++ /dev/null @@ -1,155 +0,0 @@ -package notifications - -import ( - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/uuid" -) - -type bridge struct { - ub URLBuilder - actor ActorRecord - source SourceRecord - request RequestRecord - sink Sink -} - -var _ Listener = &bridge{} - -// URLBuilder defines a subset of url builder to be used by the event listener. -type URLBuilder interface { - BuildManifestURL(name, tag string) (string, error) - BuildBlobURL(name string, dgst digest.Digest) (string, error) -} - -// NewBridge returns a notification listener that writes records to sink, -// using the actor and source. Any urls populated in the events created by -// this bridge will be created using the URLBuilder. -// TODO(stevvooe): Update this to simply take a context.Context object. -func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { - return &bridge{ - ub: ub, - actor: actor, - source: source, - request: request, - sink: sink, - } -} - -// NewRequestRecord builds a RequestRecord for use in NewBridge from an -// http.Request, associating it with a request id. -func NewRequestRecord(id string, r *http.Request) RequestRecord { - return RequestRecord{ - ID: id, - Addr: context.RemoteAddr(r), - Host: r.Host, - Method: r.Method, - UserAgent: r.UserAgent(), - } -} - -func (b *bridge) ManifestPushed(repo string, sm *manifest.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionPush, repo, sm) -} - -func (b *bridge) ManifestPulled(repo string, sm *manifest.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionPull, repo, sm) -} - -func (b *bridge) ManifestDeleted(repo string, sm *manifest.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionDelete, repo, sm) -} - -func (b *bridge) BlobPushed(repo string, desc distribution.Descriptor) error { - return b.createBlobEventAndWrite(EventActionPush, repo, desc) -} - -func (b *bridge) BlobPulled(repo string, desc distribution.Descriptor) error { - return b.createBlobEventAndWrite(EventActionPull, repo, desc) -} - -func (b *bridge) BlobDeleted(repo string, desc distribution.Descriptor) error { - return b.createBlobEventAndWrite(EventActionDelete, repo, desc) -} - -func (b *bridge) createManifestEventAndWrite(action string, repo string, sm *manifest.SignedManifest) error { - manifestEvent, err := b.createManifestEvent(action, repo, sm) - if err != nil { - return err - } - - return b.sink.Write(*manifestEvent) -} - -func (b *bridge) createManifestEvent(action string, repo string, sm *manifest.SignedManifest) (*Event, error) { - event := b.createEvent(action) - event.Target.MediaType = manifest.ManifestMediaType - event.Target.Repository = repo - - p, err := sm.Payload() - if err != nil { - return nil, err - } - - event.Target.Length = int64(len(p)) - event.Target.Size = int64(len(p)) - event.Target.Digest, err = digest.FromBytes(p) - if err != nil { - return nil, err - } - - event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, event.Target.Digest.String()) - if err != nil { - return nil, err - } - - return event, nil -} - -func (b *bridge) createBlobEventAndWrite(action string, repo string, desc distribution.Descriptor) error { - event, err := b.createBlobEvent(action, repo, desc) - if err != nil { - return err - } - - return b.sink.Write(*event) -} - -func (b *bridge) createBlobEvent(action string, repo string, desc distribution.Descriptor) (*Event, error) { - event := b.createEvent(action) - event.Target.Descriptor = desc - event.Target.Length = desc.Size - event.Target.Repository = repo - - var err error - event.Target.URL, err = b.ub.BuildBlobURL(repo, desc.Digest) - if err != nil { - return nil, err - } - - return event, nil -} - -// createEvent creates an event with actor and source populated. -func (b *bridge) createEvent(action string) *Event { - event := createEvent(action) - event.Source = b.source - event.Actor = b.actor - event.Request = b.request - - return event -} - -// createEvent returns a new event, timestamped, with the specified action. -func createEvent(action string) *Event { - return &Event{ - ID: uuid.Generate().String(), - Timestamp: time.Now(), - Action: action, - } -} diff --git a/vendor/github.com/docker/distribution/notifications/bridge_test.go b/vendor/github.com/docker/distribution/notifications/bridge_test.go deleted file mode 100644 index fbf557d8..00000000 --- a/vendor/github.com/docker/distribution/notifications/bridge_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package notifications - -import ( - "testing" - - "github.com/docker/distribution/digest" - - "github.com/docker/libtrust" - - "github.com/docker/distribution/manifest" - - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/uuid" -) - -var ( - // common environment for expected manifest events. - - repo = "test/repo" - source = SourceRecord{ - Addr: "remote.test", - InstanceID: uuid.Generate().String(), - } - ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/")) - - actor = ActorRecord{ - Name: "test", - } - request = RequestRecord{} - m = manifest.Manifest{ - Name: repo, - Tag: "latest", - } - - sm *manifest.SignedManifest - payload []byte - dgst digest.Digest -) - -func TestEventBridgeManifestPulled(t *testing.T) { - - l := createTestEnv(t, testSinkFn(func(events ...Event) error { - checkCommonManifest(t, EventActionPull, events...) - - return nil - })) - - if err := l.ManifestPulled(repo, sm); err != nil { - t.Fatalf("unexpected error notifying manifest pull: %v", err) - } -} - -func TestEventBridgeManifestPushed(t *testing.T) { - l := createTestEnv(t, testSinkFn(func(events ...Event) error { - checkCommonManifest(t, EventActionPush, events...) - - return nil - })) - - if err := l.ManifestPushed(repo, sm); err != nil { - t.Fatalf("unexpected error notifying manifest pull: %v", err) - } -} - -func TestEventBridgeManifestDeleted(t *testing.T) { - l := createTestEnv(t, testSinkFn(func(events ...Event) error { - checkCommonManifest(t, EventActionDelete, events...) - - return nil - })) - - if err := l.ManifestDeleted(repo, sm); err != nil { - t.Fatalf("unexpected error notifying manifest pull: %v", err) - } -} - -func createTestEnv(t *testing.T, fn testSinkFn) Listener { - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("error generating private key: %v", err) - } - - sm, err = manifest.Sign(&m, pk) - if err != nil { - t.Fatalf("error signing manifest: %v", err) - } - - payload, err = sm.Payload() - if err != nil { - t.Fatalf("error getting manifest payload: %v", err) - } - - dgst, err = digest.FromBytes(payload) - if err != nil { - t.Fatalf("error digesting manifest payload: %v", err) - } - - return NewBridge(ub, source, actor, request, fn) -} - -func checkCommonManifest(t *testing.T, action string, events ...Event) { - checkCommon(t, events...) - - event := events[0] - if event.Action != action { - t.Fatalf("unexpected event action: %q != %q", event.Action, action) - } - - u, err := ub.BuildManifestURL(repo, dgst.String()) - if err != nil { - t.Fatalf("error building expected url: %v", err) - } - - if event.Target.URL != u { - t.Fatalf("incorrect url passed: %q != %q", event.Target.URL, u) - } -} - -func checkCommon(t *testing.T, events ...Event) { - if len(events) != 1 { - t.Fatalf("unexpected number of events: %v != 1", len(events)) - } - - event := events[0] - - if event.Source != source { - t.Fatalf("source not equal: %#v != %#v", event.Source, source) - } - - if event.Request != request { - t.Fatalf("request not equal: %#v != %#v", event.Request, request) - } - - if event.Actor != actor { - t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) - } - - if event.Target.Digest != dgst { - t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) - } - - if event.Target.Length != int64(len(payload)) { - t.Fatalf("unexpected target length: %v != %v", event.Target.Length, len(payload)) - } - - if event.Target.Repository != repo { - t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) - } - -} - -type testSinkFn func(events ...Event) error - -func (tsf testSinkFn) Write(events ...Event) error { - return tsf(events...) -} - -func (tsf testSinkFn) Close() error { return nil } - -func mustUB(ub *v2.URLBuilder, err error) *v2.URLBuilder { - if err != nil { - panic(err) - } - - return ub -} diff --git a/vendor/github.com/docker/distribution/notifications/endpoint.go b/vendor/github.com/docker/distribution/notifications/endpoint.go deleted file mode 100644 index dfdb111c..00000000 --- a/vendor/github.com/docker/distribution/notifications/endpoint.go +++ /dev/null @@ -1,86 +0,0 @@ -package notifications - -import ( - "net/http" - "time" -) - -// EndpointConfig covers the optional configuration parameters for an active -// endpoint. -type EndpointConfig struct { - Headers http.Header - Timeout time.Duration - Threshold int - Backoff time.Duration -} - -// defaults set any zero-valued fields to a reasonable default. -func (ec *EndpointConfig) defaults() { - if ec.Timeout <= 0 { - ec.Timeout = time.Second - } - - if ec.Threshold <= 0 { - ec.Threshold = 10 - } - - if ec.Backoff <= 0 { - ec.Backoff = time.Second - } -} - -// Endpoint is a reliable, queued, thread-safe sink that notify external http -// services when events are written. Writes are non-blocking and always -// succeed for callers but events may be queued internally. -type Endpoint struct { - Sink - url string - name string - - EndpointConfig - - metrics *safeMetrics -} - -// NewEndpoint returns a running endpoint, ready to receive events. -func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { - var endpoint Endpoint - endpoint.name = name - endpoint.url = url - endpoint.EndpointConfig = config - endpoint.defaults() - endpoint.metrics = newSafeMetrics() - - // Configures the inmemory queue, retry, http pipeline. - endpoint.Sink = newHTTPSink( - endpoint.url, endpoint.Timeout, endpoint.Headers, - endpoint.metrics.httpStatusListener()) - endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) - endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) - - register(&endpoint) - return &endpoint -} - -// Name returns the name of the endpoint, generally used for debugging. -func (e *Endpoint) Name() string { - return e.name -} - -// URL returns the url of the endpoint. -func (e *Endpoint) URL() string { - return e.url -} - -// ReadMetrics populates em with metrics from the endpoint. -func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { - e.metrics.Lock() - defer e.metrics.Unlock() - - *em = e.metrics.EndpointMetrics - // Map still need to copied in a threadsafe manner. - em.Statuses = make(map[string]int) - for k, v := range e.metrics.Statuses { - em.Statuses[k] = v - } -} diff --git a/vendor/github.com/docker/distribution/notifications/event.go b/vendor/github.com/docker/distribution/notifications/event.go deleted file mode 100644 index 97030026..00000000 --- a/vendor/github.com/docker/distribution/notifications/event.go +++ /dev/null @@ -1,152 +0,0 @@ -package notifications - -import ( - "fmt" - "time" - - "github.com/docker/distribution" -) - -// EventAction constants used in action field of Event. -const ( - EventActionPull = "pull" - EventActionPush = "push" - EventActionDelete = "delete" -) - -const ( - // EventsMediaType is the mediatype for the json event envelope. If the - // Event, ActorRecord, SourceRecord or Envelope structs change, the version - // number should be incremented. - EventsMediaType = "application/vnd.docker.distribution.events.v1+json" - // LayerMediaType is the media type for image rootfs diffs (aka "layers") - // used by Docker. We don't expect this to change for quite a while. - layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" -) - -// Envelope defines the fields of a json event envelope message that can hold -// one or more events. -type Envelope struct { - // Events make up the contents of the envelope. Events present in a single - // envelope are not necessarily related. - Events []Event `json:"events,omitempty"` -} - -// TODO(stevvooe): The event type should be separate from the json format. It -// should be defined as an interface. Leaving as is for now since we don't -// need that at this time. If we make this change, the struct below would be -// called "EventRecord". - -// Event provides the fields required to describe a registry event. -type Event struct { - // ID provides a unique identifier for the event. - ID string `json:"id,omitempty"` - - // Timestamp is the time at which the event occurred. - Timestamp time.Time `json:"timestamp,omitempty"` - - // Action indicates what action encompasses the provided event. - Action string `json:"action,omitempty"` - - // Target uniquely describes the target of the event. - Target struct { - // TODO(stevvooe): Use http.DetectContentType for layers, maybe. - - distribution.Descriptor - - // Length in bytes of content. Same as Size field in Descriptor. - // Provided for backwards compatibility. - Length int64 `json:"length,omitempty"` - - // Repository identifies the named repository. - Repository string `json:"repository,omitempty"` - - // URL provides a direct link to the content. - URL string `json:"url,omitempty"` - } `json:"target,omitempty"` - - // Request covers the request that generated the event. - Request RequestRecord `json:"request,omitempty"` - - // Actor specifies the agent that initiated the event. For most - // situations, this could be from the authorizaton context of the request. - Actor ActorRecord `json:"actor,omitempty"` - - // Source identifies the registry node that generated the event. Put - // differently, while the actor "initiates" the event, the source - // "generates" it. - Source SourceRecord `json:"source,omitempty"` -} - -// ActorRecord specifies the agent that initiated the event. For most -// situations, this could be from the authorizaton context of the request. -// Data in this record can refer to both the initiating client and the -// generating request. -type ActorRecord struct { - // Name corresponds to the subject or username associated with the - // request context that generated the event. - Name string `json:"name,omitempty"` - - // TODO(stevvooe): Look into setting a session cookie to get this - // without docker daemon. - // SessionID - - // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and - // get the actual command. - // Command -} - -// RequestRecord covers the request that generated the event. -type RequestRecord struct { - // ID uniquely identifies the request that initiated the event. - ID string `json:"id"` - - // Addr contains the ip or hostname and possibly port of the client - // connection that initiated the event. This is the RemoteAddr from - // the standard http request. - Addr string `json:"addr,omitempty"` - - // Host is the externally accessible host name of the registry instance, - // as specified by the http host header on incoming requests. - Host string `json:"host,omitempty"` - - // Method has the request method that generated the event. - Method string `json:"method"` - - // UserAgent contains the user agent header of the request. - UserAgent string `json:"useragent"` -} - -// SourceRecord identifies the registry node that generated the event. Put -// differently, while the actor "initiates" the event, the source "generates" -// it. -type SourceRecord struct { - // Addr contains the ip or hostname and the port of the registry node - // that generated the event. Generally, this will be resolved by - // os.Hostname() along with the running port. - Addr string `json:"addr,omitempty"` - - // InstanceID identifies a running instance of an application. Changes - // after each restart. - InstanceID string `json:"instanceID,omitempty"` -} - -var ( - // ErrSinkClosed is returned if a write is issued to a sink that has been - // closed. If encountered, the error should be considered terminal and - // retries will not be successful. - ErrSinkClosed = fmt.Errorf("sink: closed") -) - -// Sink accepts and sends events. -type Sink interface { - // Write writes one or more events to the sink. If no error is returned, - // the caller will assume that all events have been committed and will not - // try to send them again. If an error is received, the caller may retry - // sending the event. The caller should cede the slice of memory to the - // sink and not modify it after calling this method. - Write(events ...Event) error - - // Close the sink, possibly waiting for pending events to flush. - Close() error -} diff --git a/vendor/github.com/docker/distribution/notifications/event_test.go b/vendor/github.com/docker/distribution/notifications/event_test.go deleted file mode 100644 index ac4dfd93..00000000 --- a/vendor/github.com/docker/distribution/notifications/event_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package notifications - -import ( - "encoding/json" - "strings" - "testing" - "time" - - "github.com/docker/distribution/manifest" -) - -// TestEventJSONFormat provides silly test to detect if the event format or -// envelope has changed. If this code fails, the revision of the protocol may -// need to be incremented. -func TestEventEnvelopeJSONFormat(t *testing.T) { - var expected = strings.TrimSpace(` -{ - "events": [ - { - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "size": 1, - "digest": "sha256:0123456789abcdef0", - "length": 1, - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-1", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "size": 2, - "digest": "tarsum.v2+sha256:0123456789abcdef1", - "length": 2, - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-2", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "size": 3, - "digest": "tarsum.v2+sha256:0123456789abcdef2", - "length": 3, - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - } - ] -} - `) - - tm, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - t.Fatalf("error creating time: %v", err) - } - - var prototype Event - prototype.Action = EventActionPush - prototype.Timestamp = tm - prototype.Actor.Name = "test-actor" - prototype.Request.ID = "asdfasdf" - prototype.Request.Addr = "client.local" - prototype.Request.Host = "registrycluster.local" - prototype.Request.Method = "PUT" - prototype.Request.UserAgent = "test/0.1" - prototype.Source.Addr = "hostname.local:port" - - var manifestPush Event - manifestPush = prototype - manifestPush.ID = "asdf-asdf-asdf-asdf-0" - manifestPush.Target.Digest = "sha256:0123456789abcdef0" - manifestPush.Target.Length = 1 - manifestPush.Target.Size = 1 - manifestPush.Target.MediaType = manifest.ManifestMediaType - manifestPush.Target.Repository = "library/test" - manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" - - var layerPush0 Event - layerPush0 = prototype - layerPush0.ID = "asdf-asdf-asdf-asdf-1" - layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1" - layerPush0.Target.Length = 2 - layerPush0.Target.Size = 2 - layerPush0.Target.MediaType = layerMediaType - layerPush0.Target.Repository = "library/test" - layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" - - var layerPush1 Event - layerPush1 = prototype - layerPush1.ID = "asdf-asdf-asdf-asdf-2" - layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2" - layerPush1.Target.Length = 3 - layerPush1.Target.Size = 3 - layerPush1.Target.MediaType = layerMediaType - layerPush1.Target.Repository = "library/test" - layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" - - var envelope Envelope - envelope.Events = append(envelope.Events, manifestPush, layerPush0, layerPush1) - - p, err := json.MarshalIndent(envelope, "", " ") - if err != nil { - t.Fatalf("unexpected error marshaling envelope: %v", err) - } - if string(p) != expected { - t.Fatalf("format has changed\n%s\n != \n%s", string(p), expected) - } -} diff --git a/vendor/github.com/docker/distribution/notifications/http.go b/vendor/github.com/docker/distribution/notifications/http.go deleted file mode 100644 index 465434f1..00000000 --- a/vendor/github.com/docker/distribution/notifications/http.go +++ /dev/null @@ -1,147 +0,0 @@ -package notifications - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "sync" - "time" -) - -// httpSink implements a single-flight, http notification endpoint. This is -// very lightweight in that it only makes an attempt at an http request. -// Reliability should be provided by the caller. -type httpSink struct { - url string - - mu sync.Mutex - closed bool - client *http.Client - listeners []httpStatusListener - - // TODO(stevvooe): Allow one to configure the media type accepted by this - // sink and choose the serialization based on that. -} - -// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other -// sinks for increased reliability. -func newHTTPSink(u string, timeout time.Duration, headers http.Header, listeners ...httpStatusListener) *httpSink { - return &httpSink{ - url: u, - listeners: listeners, - client: &http.Client{ - Transport: &headerRoundTripper{ - Transport: http.DefaultTransport.(*http.Transport), - headers: headers, - }, - Timeout: timeout, - }, - } -} - -// httpStatusListener is called on various outcomes of sending notifications. -type httpStatusListener interface { - success(status int, events ...Event) - failure(status int, events ...Event) - err(err error, events ...Event) -} - -// Accept makes an attempt to notify the endpoint, returning an error if it -// fails. It is the caller's responsibility to retry on error. The events are -// accepted or rejected as a group. -func (hs *httpSink) Write(events ...Event) error { - hs.mu.Lock() - defer hs.mu.Unlock() - defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections() - - if hs.closed { - return ErrSinkClosed - } - - envelope := Envelope{ - Events: events, - } - - // TODO(stevvooe): It is not ideal to keep re-encoding the request body on - // retry but we are going to do it to keep the code simple. It is likely - // we could change the event struct to manage its own buffer. - - p, err := json.MarshalIndent(envelope, "", " ") - if err != nil { - for _, listener := range hs.listeners { - listener.err(err, events...) - } - return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) - } - - body := bytes.NewReader(p) - resp, err := hs.client.Post(hs.url, EventsMediaType, body) - if err != nil { - for _, listener := range hs.listeners { - listener.err(err, events...) - } - - return fmt.Errorf("%v: error posting: %v", hs, err) - } - defer resp.Body.Close() - - // The notifier will treat any 2xx or 3xx response as accepted by the - // endpoint. - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: - for _, listener := range hs.listeners { - listener.success(resp.StatusCode, events...) - } - - // TODO(stevvooe): This is a little accepting: we may want to support - // unsupported media type responses with retries using the correct - // media type. There may also be cases that will never work. - - return nil - default: - for _, listener := range hs.listeners { - listener.failure(resp.StatusCode, events...) - } - return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) - } -} - -// Close the endpoint -func (hs *httpSink) Close() error { - hs.mu.Lock() - defer hs.mu.Unlock() - - if hs.closed { - return fmt.Errorf("httpsink: already closed") - } - - hs.closed = true - return nil -} - -func (hs *httpSink) String() string { - return fmt.Sprintf("httpSink{%s}", hs.url) -} - -type headerRoundTripper struct { - *http.Transport // must be transport to support CancelRequest - headers http.Header -} - -func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var nreq http.Request - nreq = *req - nreq.Header = make(http.Header) - - merge := func(headers http.Header) { - for k, v := range headers { - nreq.Header[k] = append(nreq.Header[k], v...) - } - } - - merge(req.Header) - merge(hrt.headers) - - return hrt.Transport.RoundTrip(&nreq) -} diff --git a/vendor/github.com/docker/distribution/notifications/http_test.go b/vendor/github.com/docker/distribution/notifications/http_test.go deleted file mode 100644 index e0276ccd..00000000 --- a/vendor/github.com/docker/distribution/notifications/http_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package notifications - -import ( - "encoding/json" - "fmt" - "mime" - "net/http" - "net/http/httptest" - "reflect" - "strconv" - "testing" - - "github.com/docker/distribution/manifest" -) - -// TestHTTPSink mocks out an http endpoint and notifies it under a couple of -// conditions, ensuring correct behavior. -func TestHTTPSink(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - if r.Method != "POST" { - w.WriteHeader(http.StatusMethodNotAllowed) - t.Fatalf("unexpected request method: %v", r.Method) - return - } - - // Extract the content type and make sure it matches - contentType := r.Header.Get("Content-Type") - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - t.Fatalf("error parsing media type: %v, contenttype=%q", err, contentType) - return - } - - if mediaType != EventsMediaType { - w.WriteHeader(http.StatusUnsupportedMediaType) - t.Fatalf("incorrect media type: %q != %q", mediaType, EventsMediaType) - return - } - - var envelope Envelope - dec := json.NewDecoder(r.Body) - if err := dec.Decode(&envelope); err != nil { - w.WriteHeader(http.StatusBadRequest) - t.Fatalf("error decoding request body: %v", err) - return - } - - // Let caller choose the status - status, err := strconv.Atoi(r.FormValue("status")) - if err != nil { - t.Logf("error parsing status: %v", err) - - // May just be empty, set status to 200 - status = http.StatusOK - } - - w.WriteHeader(status) - })) - - metrics := newSafeMetrics() - sink := newHTTPSink(server.URL, 0, nil, - &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) - - var expectedMetrics EndpointMetrics - expectedMetrics.Statuses = make(map[string]int) - - for _, tc := range []struct { - events []Event // events to send - url string - failure bool // true if there should be a failure. - statusCode int // if not set, no status code should be incremented. - }{ - { - statusCode: http.StatusOK, - events: []Event{ - createTestEvent("push", "library/test", manifest.ManifestMediaType)}, - }, - { - statusCode: http.StatusOK, - events: []Event{ - createTestEvent("push", "library/test", manifest.ManifestMediaType), - createTestEvent("push", "library/test", layerMediaType), - createTestEvent("push", "library/test", layerMediaType), - }, - }, - { - statusCode: http.StatusTemporaryRedirect, - }, - { - statusCode: http.StatusBadRequest, - failure: true, - }, - { - // Case where connection never goes through. - url: "http://shoudlntresolve/", - failure: true, - }, - } { - - if tc.failure { - expectedMetrics.Failures += len(tc.events) - } else { - expectedMetrics.Successes += len(tc.events) - } - - if tc.statusCode > 0 { - expectedMetrics.Statuses[fmt.Sprintf("%d %s", tc.statusCode, http.StatusText(tc.statusCode))] += len(tc.events) - } - - url := tc.url - if url == "" { - url = server.URL + "/" - } - // setup endpoint to respond with expected status code. - url += fmt.Sprintf("?status=%v", tc.statusCode) - sink.url = url - - t.Logf("testcase: %v, fail=%v", url, tc.failure) - // Try a simple event emission. - err := sink.Write(tc.events...) - - if !tc.failure { - if err != nil { - t.Fatalf("unexpected error send event: %v", err) - } - } else { - if err == nil { - t.Fatalf("the endpoint should have rejected the request") - } - } - - if !reflect.DeepEqual(metrics.EndpointMetrics, expectedMetrics) { - t.Fatalf("metrics not as expected: %#v != %#v", metrics.EndpointMetrics, expectedMetrics) - } - } - - if err := sink.Close(); err != nil { - t.Fatalf("unexpected error closing http sink: %v", err) - } - - // double close returns error - if err := sink.Close(); err == nil { - t.Fatalf("second close should have returned error: %v", err) - } - -} - -func createTestEvent(action, repo, typ string) Event { - event := createEvent(action) - - event.Target.MediaType = typ - event.Target.Repository = repo - - return *event -} diff --git a/vendor/github.com/docker/distribution/notifications/listener.go b/vendor/github.com/docker/distribution/notifications/listener.go deleted file mode 100644 index b86fa8a4..00000000 --- a/vendor/github.com/docker/distribution/notifications/listener.go +++ /dev/null @@ -1,205 +0,0 @@ -package notifications - -import ( - "net/http" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -// ManifestListener describes a set of methods for listening to events related to manifests. -type ManifestListener interface { - ManifestPushed(repo string, sm *manifest.SignedManifest) error - ManifestPulled(repo string, sm *manifest.SignedManifest) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - ManifestDeleted(repo string, sm *manifest.SignedManifest) error -} - -// BlobListener describes a listener that can respond to layer related events. -type BlobListener interface { - BlobPushed(repo string, desc distribution.Descriptor) error - BlobPulled(repo string, desc distribution.Descriptor) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - BlobDeleted(repo string, desc distribution.Descriptor) error -} - -// Listener combines all repository events into a single interface. -type Listener interface { - ManifestListener - BlobListener -} - -type repositoryListener struct { - distribution.Repository - listener Listener -} - -// Listen dispatches events on the repository to the listener. -func Listen(repo distribution.Repository, listener Listener) distribution.Repository { - return &repositoryListener{ - Repository: repo, - listener: listener, - } -} - -func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - manifests, err := rl.Repository.Manifests(ctx, options...) - if err != nil { - return nil, err - } - return &manifestServiceListener{ - ManifestService: manifests, - parent: rl, - }, nil -} - -func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { - return &blobServiceListener{ - BlobStore: rl.Repository.Blobs(ctx), - parent: rl, - } -} - -type manifestServiceListener struct { - distribution.ManifestService - parent *repositoryListener -} - -func (msl *manifestServiceListener) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { - sm, err := msl.ManifestService.Get(dgst) - if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { - logrus.Errorf("error dispatching manifest pull to listener: %v", err) - } - } - - return sm, err -} - -func (msl *manifestServiceListener) Put(sm *manifest.SignedManifest) error { - err := msl.ManifestService.Put(sm) - - if err == nil { - if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Name(), sm); err != nil { - logrus.Errorf("error dispatching manifest push to listener: %v", err) - } - } - - return err -} - -func (msl *manifestServiceListener) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { - sm, err := msl.ManifestService.GetByTag(tag, options...) - if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { - logrus.Errorf("error dispatching manifest pull to listener: %v", err) - } - } - - return sm, err -} - -type blobServiceListener struct { - distribution.BlobStore - parent *repositoryListener -} - -var _ distribution.BlobStore = &blobServiceListener{} - -func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - p, err := bsl.BlobStore.Get(ctx, dgst) - if err == nil { - if desc, err := bsl.Stat(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) - } else { - if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) - } - } - } - - return p, err -} - -func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - rc, err := bsl.BlobStore.Open(ctx, dgst) - if err == nil { - if desc, err := bsl.Stat(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) - } else { - if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) - } - } - } - - return rc, err -} - -func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) - if err == nil { - if desc, err := bsl.Stat(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) - } else { - if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) - } - } - } - - return err -} - -func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - desc, err := bsl.BlobStore.Put(ctx, mediaType, p) - if err == nil { - if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Name(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) - } - } - - return desc, err -} - -func (bsl *blobServiceListener) Create(ctx context.Context) (distribution.BlobWriter, error) { - wr, err := bsl.BlobStore.Create(ctx) - return bsl.decorateWriter(wr), err -} - -func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - wr, err := bsl.BlobStore.Resume(ctx, id) - return bsl.decorateWriter(wr), err -} - -func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { - return &blobWriterListener{ - BlobWriter: wr, - parent: bsl, - } -} - -type blobWriterListener struct { - distribution.BlobWriter - parent *blobServiceListener -} - -func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - committed, err := bwl.BlobWriter.Commit(ctx, desc) - if err == nil { - if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Name(), committed); err != nil { - context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) - } - } - - return committed, err -} diff --git a/vendor/github.com/docker/distribution/notifications/listener_test.go b/vendor/github.com/docker/distribution/notifications/listener_test.go deleted file mode 100644 index ccd84593..00000000 --- a/vendor/github.com/docker/distribution/notifications/listener_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package notifications - -import ( - "io" - "reflect" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" -) - -func TestListener(t *testing.T) { - ctx := context.Background() - registry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) - tl := &testListener{ - ops: make(map[string]int), - } - - repository, err := registry.Repository(ctx, "foo/bar") - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - repository = Listen(repository, tl) - - // Now take the registry through a number of operations - checkExerciseRepository(t, repository) - - expectedOps := map[string]int{ - "manifest:push": 1, - "manifest:pull": 2, - // "manifest:delete": 0, // deletes not supported for now - "layer:push": 2, - "layer:pull": 2, - // "layer:delete": 0, // deletes not supported for now - } - - if !reflect.DeepEqual(tl.ops, expectedOps) { - t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) - } - -} - -type testListener struct { - ops map[string]int -} - -func (tl *testListener) ManifestPushed(repo string, sm *manifest.SignedManifest) error { - tl.ops["manifest:push"]++ - - return nil -} - -func (tl *testListener) ManifestPulled(repo string, sm *manifest.SignedManifest) error { - tl.ops["manifest:pull"]++ - return nil -} - -func (tl *testListener) ManifestDeleted(repo string, sm *manifest.SignedManifest) error { - tl.ops["manifest:delete"]++ - return nil -} - -func (tl *testListener) BlobPushed(repo string, desc distribution.Descriptor) error { - tl.ops["layer:push"]++ - return nil -} - -func (tl *testListener) BlobPulled(repo string, desc distribution.Descriptor) error { - tl.ops["layer:pull"]++ - return nil -} - -func (tl *testListener) BlobDeleted(repo string, desc distribution.Descriptor) error { - tl.ops["layer:delete"]++ - return nil -} - -// checkExerciseRegistry takes the registry through all of its operations, -// carrying out generic checks. -func checkExerciseRepository(t *testing.T, repository distribution.Repository) { - // TODO(stevvooe): This would be a nice testutil function. Basically, it - // takes the registry through a common set of operations. This could be - // used to make cross-cutting updates by changing internals that affect - // update counts. Basically, it would make writing tests a lot easier. - ctx := context.Background() - tag := "thetag" - m := manifest.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: repository.Name(), - Tag: tag, - } - - blobs := repository.Blobs(ctx) - for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating test layer: %v", err) - } - dgst := digest.Digest(ds) - - wr, err := blobs.Create(ctx) - if err != nil { - t.Fatalf("error creating layer upload: %v", err) - } - - // Use the resumes, as well! - wr, err = blobs.Resume(ctx, wr.ID()) - if err != nil { - t.Fatalf("error resuming layer upload: %v", err) - } - - io.Copy(wr, rs) - - if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - - m.FSLayers = append(m.FSLayers, manifest.FSLayer{ - BlobSum: dgst, - }) - - // Then fetch the blobs - if rc, err := blobs.Open(ctx, dgst); err != nil { - t.Fatalf("error fetching layer: %v", err) - } else { - defer rc.Close() - } - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating key: %v", err) - } - - sm, err := manifest.Sign(&m, pk) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - - manifests, err := repository.Manifests(ctx) - if err != nil { - t.Fatal(err.Error()) - } - - if err = manifests.Put(sm); err != nil { - t.Fatalf("unexpected error putting the manifest: %v", err) - } - - p, err := sm.Payload() - if err != nil { - t.Fatalf("unexpected error getting manifest payload: %v", err) - } - - dgst, err := digest.FromBytes(p) - if err != nil { - t.Fatalf("unexpected error digesting manifest payload: %v", err) - } - - fetchedByManifest, err := manifests.Get(dgst) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - if fetchedByManifest.Tag != sm.Tag { - t.Fatalf("retrieved unexpected manifest: %v", err) - } - - fetched, err := manifests.GetByTag(tag) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - if fetched.Tag != fetchedByManifest.Tag { - t.Fatalf("retrieved unexpected manifest: %v", err) - } -} diff --git a/vendor/github.com/docker/distribution/notifications/metrics.go b/vendor/github.com/docker/distribution/notifications/metrics.go deleted file mode 100644 index 2a8ffcbd..00000000 --- a/vendor/github.com/docker/distribution/notifications/metrics.go +++ /dev/null @@ -1,152 +0,0 @@ -package notifications - -import ( - "expvar" - "fmt" - "net/http" - "sync" -) - -// EndpointMetrics track various actions taken by the endpoint, typically by -// number of events. The goal of this to export it via expvar but we may find -// some other future solution to be better. -type EndpointMetrics struct { - Pending int // events pending in queue - Events int // total events incoming - Successes int // total events written successfully - Failures int // total events failed - Errors int // total events errored - Statuses map[string]int // status code histogram, per call event -} - -// safeMetrics guards the metrics implementation with a lock and provides a -// safe update function. -type safeMetrics struct { - EndpointMetrics - sync.Mutex // protects statuses map -} - -// newSafeMetrics returns safeMetrics with map allocated. -func newSafeMetrics() *safeMetrics { - var sm safeMetrics - sm.Statuses = make(map[string]int) - return &sm -} - -// httpStatusListener returns the listener for the http sink that updates the -// relevent counters. -func (sm *safeMetrics) httpStatusListener() httpStatusListener { - return &endpointMetricsHTTPStatusListener{ - safeMetrics: sm, - } -} - -// eventQueueListener returns a listener that maintains queue related counters. -func (sm *safeMetrics) eventQueueListener() eventQueueListener { - return &endpointMetricsEventQueueListener{ - safeMetrics: sm, - } -} - -// endpointMetricsHTTPStatusListener increments counters related to http sinks -// for the relevent events. -type endpointMetricsHTTPStatusListener struct { - *safeMetrics -} - -var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} - -func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) - emsl.Successes += len(events) -} - -func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) - emsl.Failures += len(events) -} - -func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Errors += len(events) -} - -// endpointMetricsEventQueueListener maintains the incoming events counter and -// the queues pending count. -type endpointMetricsEventQueueListener struct { - *safeMetrics -} - -func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { - eqc.Lock() - defer eqc.Unlock() - eqc.Events += len(events) - eqc.Pending += len(events) -} - -func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { - eqc.Lock() - defer eqc.Unlock() - eqc.Pending -= len(events) -} - -// endpoints is global registry of endpoints used to report metrics to expvar -var endpoints struct { - registered []*Endpoint - mu sync.Mutex -} - -// register places the endpoint into expvar so that stats are tracked. -func register(e *Endpoint) { - endpoints.mu.Lock() - defer endpoints.mu.Unlock() - - endpoints.registered = append(endpoints.registered, e) -} - -func init() { - // NOTE(stevvooe): Setup registry metrics structure to report to expvar. - // Ideally, we do more metrics through logging but we need some nice - // realtime metrics for queue state for now. - - registry := expvar.Get("registry") - - if registry == nil { - registry = expvar.NewMap("registry") - } - - var notifications expvar.Map - notifications.Init() - notifications.Set("endpoints", expvar.Func(func() interface{} { - endpoints.mu.Lock() - defer endpoints.mu.Unlock() - - var names []interface{} - for _, v := range endpoints.registered { - var epjson struct { - Name string `json:"name"` - URL string `json:"url"` - EndpointConfig - - Metrics EndpointMetrics - } - - epjson.Name = v.Name() - epjson.URL = v.URL() - epjson.EndpointConfig = v.EndpointConfig - - v.ReadMetrics(&epjson.Metrics) - - names = append(names, epjson) - } - - return names - })) - - registry.(*expvar.Map).Set("notifications", ¬ifications) -} diff --git a/vendor/github.com/docker/distribution/notifications/sinks.go b/vendor/github.com/docker/distribution/notifications/sinks.go deleted file mode 100644 index dda4a565..00000000 --- a/vendor/github.com/docker/distribution/notifications/sinks.go +++ /dev/null @@ -1,337 +0,0 @@ -package notifications - -import ( - "container/list" - "fmt" - "sync" - "time" - - "github.com/Sirupsen/logrus" -) - -// NOTE(stevvooe): This file contains definitions for several utility sinks. -// Typically, the broadcaster is the only sink that should be required -// externally, but others are suitable for export if the need arises. Albeit, -// the tight integration with endpoint metrics should be removed. - -// Broadcaster sends events to multiple, reliable Sinks. The goal of this -// component is to dispatch events to configured endpoints. Reliability can be -// provided by wrapping incoming sinks. -type Broadcaster struct { - sinks []Sink - events chan []Event - closed chan chan struct{} -} - -// NewBroadcaster ... -// Add appends one or more sinks to the list of sinks. The broadcaster -// behavior will be affected by the properties of the sink. Generally, the -// sink should accept all messages and deal with reliability on its own. Use -// of EventQueue and RetryingSink should be used here. -func NewBroadcaster(sinks ...Sink) *Broadcaster { - b := Broadcaster{ - sinks: sinks, - events: make(chan []Event), - closed: make(chan chan struct{}), - } - - // Start the broadcaster - go b.run() - - return &b -} - -// Write accepts a block of events to be dispatched to all sinks. This method -// will never fail and should never block (hopefully!). The caller cedes the -// slice memory to the broadcaster and should not modify it after calling -// write. -func (b *Broadcaster) Write(events ...Event) error { - select { - case b.events <- events: - case <-b.closed: - return ErrSinkClosed - } - return nil -} - -// Close the broadcaster, ensuring that all messages are flushed to the -// underlying sink before returning. -func (b *Broadcaster) Close() error { - logrus.Infof("broadcaster: closing") - select { - case <-b.closed: - // already closed - return fmt.Errorf("broadcaster: already closed") - default: - // do a little chan handoff dance to synchronize closing - closed := make(chan struct{}) - b.closed <- closed - close(b.closed) - <-closed - return nil - } -} - -// run is the main broadcast loop, started when the broadcaster is created. -// Under normal conditions, it waits for events on the event channel. After -// Close is called, this goroutine will exit. -func (b *Broadcaster) run() { - for { - select { - case block := <-b.events: - for _, sink := range b.sinks { - if err := sink.Write(block...); err != nil { - logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) - } - } - case closing := <-b.closed: - - // close all the underlying sinks - for _, sink := range b.sinks { - if err := sink.Close(); err != nil { - logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) - } - } - closing <- struct{}{} - - logrus.Debugf("broadcaster: closed") - return - } - } -} - -// eventQueue accepts all messages into a queue for asynchronous consumption -// by a sink. It is unbounded and thread safe but the sink must be reliable or -// events will be dropped. -type eventQueue struct { - sink Sink - events *list.List - listeners []eventQueueListener - cond *sync.Cond - mu sync.Mutex - closed bool -} - -// eventQueueListener is called when various events happen on the queue. -type eventQueueListener interface { - ingress(events ...Event) - egress(events ...Event) -} - -// newEventQueue returns a queue to the provided sink. If the updater is non- -// nil, it will be called to update pending metrics on ingress and egress. -func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { - eq := eventQueue{ - sink: sink, - events: list.New(), - listeners: listeners, - } - - eq.cond = sync.NewCond(&eq.mu) - go eq.run() - return &eq -} - -// Write accepts the events into the queue, only failing if the queue has -// beend closed. -func (eq *eventQueue) Write(events ...Event) error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return ErrSinkClosed - } - - for _, listener := range eq.listeners { - listener.ingress(events...) - } - eq.events.PushBack(events) - eq.cond.Signal() // signal waiters - - return nil -} - -// Close shutsdown the event queue, flushing -func (eq *eventQueue) Close() error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return fmt.Errorf("eventqueue: already closed") - } - - // set closed flag - eq.closed = true - eq.cond.Signal() // signal flushes queue - eq.cond.Wait() // wait for signal from last flush - - return eq.sink.Close() -} - -// run is the main goroutine to flush events to the target sink. -func (eq *eventQueue) run() { - for { - block := eq.next() - - if block == nil { - return // nil block means event queue is closed. - } - - if err := eq.sink.Write(block...); err != nil { - logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) - } - - for _, listener := range eq.listeners { - listener.egress(block...) - } - } -} - -// next encompasses the critical section of the run loop. When the queue is -// empty, it will block on the condition. If new data arrives, it will wake -// and return a block. When closed, a nil slice will be returned. -func (eq *eventQueue) next() []Event { - eq.mu.Lock() - defer eq.mu.Unlock() - - for eq.events.Len() < 1 { - if eq.closed { - eq.cond.Broadcast() - return nil - } - - eq.cond.Wait() - } - - front := eq.events.Front() - block := front.Value.([]Event) - eq.events.Remove(front) - - return block -} - -// retryingSink retries the write until success or an ErrSinkClosed is -// returned. Underlying sink must have p > 0 of succeeding or the sink will -// block. Internally, it is a circuit breaker retries to manage reset. -// Concurrent calls to a retrying sink are serialized through the sink, -// meaning that if one is in-flight, another will not proceed. -type retryingSink struct { - mu sync.Mutex - sink Sink - closed bool - - // circuit breaker heuristics - failures struct { - threshold int - recent int - last time.Time - backoff time.Duration // time after which we retry after failure. - } -} - -type retryingSinkListener interface { - active(events ...Event) - retry(events ...Event) -} - -// TODO(stevvooe): We are using circuit break here, which actually doesn't -// make a whole lot of sense for this use case, since we always retry. Move -// this to use bounded exponential backoff. - -// newRetryingSink returns a sink that will retry writes to a sink, backing -// off on failure. Parameters threshold and backoff adjust the behavior of the -// circuit breaker. -func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { - rs := &retryingSink{ - sink: sink, - } - rs.failures.threshold = threshold - rs.failures.backoff = backoff - - return rs -} - -// Write attempts to flush the events to the downstream sink until it succeeds -// or the sink is closed. -func (rs *retryingSink) Write(events ...Event) error { - rs.mu.Lock() - defer rs.mu.Unlock() - -retry: - - if rs.closed { - return ErrSinkClosed - } - - if !rs.proceed() { - logrus.Warnf("%v encountered too many errors, backing off", rs.sink) - rs.wait(rs.failures.backoff) - goto retry - } - - if err := rs.write(events...); err != nil { - if err == ErrSinkClosed { - // terminal! - return err - } - - logrus.Errorf("retryingsink: error writing events: %v, retrying", err) - goto retry - } - - return nil -} - -// Close closes the sink and the underlying sink. -func (rs *retryingSink) Close() error { - rs.mu.Lock() - defer rs.mu.Unlock() - - if rs.closed { - return fmt.Errorf("retryingsink: already closed") - } - - rs.closed = true - return rs.sink.Close() -} - -// write provides a helper that dispatches failure and success properly. Used -// by write as the single-flight write call. -func (rs *retryingSink) write(events ...Event) error { - if err := rs.sink.Write(events...); err != nil { - rs.failure() - return err - } - - rs.reset() - return nil -} - -// wait backoff time against the sink, unlocking so others can proceed. Should -// only be called by methods that currently have the mutex. -func (rs *retryingSink) wait(backoff time.Duration) { - rs.mu.Unlock() - defer rs.mu.Lock() - - // backoff here - time.Sleep(backoff) -} - -// reset marks a successful call. -func (rs *retryingSink) reset() { - rs.failures.recent = 0 - rs.failures.last = time.Time{} -} - -// failure records a failure. -func (rs *retryingSink) failure() { - rs.failures.recent++ - rs.failures.last = time.Now().UTC() -} - -// proceed returns true if the call should proceed based on circuit breaker -// heuristics. -func (rs *retryingSink) proceed() bool { - return rs.failures.recent < rs.failures.threshold || - time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) -} diff --git a/vendor/github.com/docker/distribution/notifications/sinks_test.go b/vendor/github.com/docker/distribution/notifications/sinks_test.go deleted file mode 100644 index 89756a99..00000000 --- a/vendor/github.com/docker/distribution/notifications/sinks_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package notifications - -import ( - "fmt" - "math/rand" - "sync" - "time" - - "github.com/Sirupsen/logrus" - - "testing" -) - -func TestBroadcaster(t *testing.T) { - const nEvents = 1000 - var sinks []Sink - - for i := 0; i < 10; i++ { - sinks = append(sinks, &testSink{}) - } - - b := NewBroadcaster(sinks...) - - var block []Event - var wg sync.WaitGroup - for i := 1; i <= nEvents; i++ { - block = append(block, createTestEvent("push", "library/test", "blob")) - - if i%10 == 0 && i > 0 { - wg.Add(1) - go func(block ...Event) { - if err := b.Write(block...); err != nil { - t.Fatalf("error writing block of length %d: %v", len(block), err) - } - wg.Done() - }(block...) - - block = nil - } - } - - wg.Wait() // Wait until writes complete - checkClose(t, b) - - // Iterate through the sinks and check that they all have the expected length. - for _, sink := range sinks { - ts := sink.(*testSink) - ts.mu.Lock() - defer ts.mu.Unlock() - - if len(ts.events) != nEvents { - t.Fatalf("not all events ended up in testsink: len(testSink) == %d, not %d", len(ts.events), nEvents) - } - - if !ts.closed { - t.Fatalf("sink should have been closed") - } - } - -} - -func TestEventQueue(t *testing.T) { - const nevents = 1000 - var ts testSink - metrics := newSafeMetrics() - eq := newEventQueue( - // delayed sync simulates destination slower than channel comms - &delayedSink{ - Sink: &ts, - delay: time.Millisecond * 1, - }, metrics.eventQueueListener()) - - var wg sync.WaitGroup - var block []Event - for i := 1; i <= nevents; i++ { - block = append(block, createTestEvent("push", "library/test", "blob")) - if i%10 == 0 && i > 0 { - wg.Add(1) - go func(block ...Event) { - if err := eq.Write(block...); err != nil { - t.Fatalf("error writing event block: %v", err) - } - wg.Done() - }(block...) - - block = nil - } - } - - wg.Wait() - checkClose(t, eq) - - ts.mu.Lock() - defer ts.mu.Unlock() - metrics.Lock() - defer metrics.Unlock() - - if len(ts.events) != nevents { - t.Fatalf("events did not make it to the sink: %d != %d", len(ts.events), 1000) - } - - if !ts.closed { - t.Fatalf("sink should have been closed") - } - - if metrics.Events != nevents { - t.Fatalf("unexpected ingress count: %d != %d", metrics.Events, nevents) - } - - if metrics.Pending != 0 { - t.Fatalf("unexpected egress count: %d != %d", metrics.Pending, 0) - } -} - -func TestRetryingSink(t *testing.T) { - - // Make a sync that fails most of the time, ensuring that all the events - // make it through. - var ts testSink - flaky := &flakySink{ - rate: 1.0, // start out always failing. - Sink: &ts, - } - s := newRetryingSink(flaky, 3, 10*time.Millisecond) - - var wg sync.WaitGroup - var block []Event - for i := 1; i <= 100; i++ { - block = append(block, createTestEvent("push", "library/test", "blob")) - - // Above 50, set the failure rate lower - if i > 50 { - s.mu.Lock() - flaky.rate = 0.90 - s.mu.Unlock() - } - - if i%10 == 0 && i > 0 { - wg.Add(1) - go func(block ...Event) { - defer wg.Done() - if err := s.Write(block...); err != nil { - t.Fatalf("error writing event block: %v", err) - } - }(block...) - - block = nil - } - } - - wg.Wait() - checkClose(t, s) - - ts.mu.Lock() - defer ts.mu.Unlock() - - if len(ts.events) != 100 { - t.Fatalf("events not propagated: %d != %d", len(ts.events), 100) - } -} - -type testSink struct { - events []Event - mu sync.Mutex - closed bool -} - -func (ts *testSink) Write(events ...Event) error { - ts.mu.Lock() - defer ts.mu.Unlock() - ts.events = append(ts.events, events...) - return nil -} - -func (ts *testSink) Close() error { - ts.mu.Lock() - defer ts.mu.Unlock() - ts.closed = true - - logrus.Infof("closing testSink") - return nil -} - -type delayedSink struct { - Sink - delay time.Duration -} - -func (ds *delayedSink) Write(events ...Event) error { - time.Sleep(ds.delay) - return ds.Sink.Write(events...) -} - -type flakySink struct { - Sink - rate float64 -} - -func (fs *flakySink) Write(events ...Event) error { - if rand.Float64() < fs.rate { - return fmt.Errorf("error writing %d events", len(events)) - } - - return fs.Sink.Write(events...) -} - -func checkClose(t *testing.T, sink Sink) { - if err := sink.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - // second close should not crash but should return an error. - if err := sink.Close(); err == nil { - t.Fatalf("no error on double close") - } - - // Write after closed should be an error - if err := sink.Write([]Event{}...); err == nil { - t.Fatalf("write after closed did not have an error") - } else if err != ErrSinkClosed { - t.Fatalf("error should be ErrSinkClosed") - } -} diff --git a/vendor/github.com/docker/distribution/project/dev-image/Dockerfile b/vendor/github.com/docker/distribution/project/dev-image/Dockerfile deleted file mode 100644 index 1e2a8471..00000000 --- a/vendor/github.com/docker/distribution/project/dev-image/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM ubuntu:14.04 - -ENV GOLANG_VERSION 1.4rc1 -ENV GOPATH /var/cache/drone -ENV GOROOT /usr/local/go -ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin - -ENV LANG C -ENV LC_ALL C - -RUN apt-get update && apt-get install -y \ - wget ca-certificates git mercurial bzr \ - --no-install-recommends \ - && rm -rf /var/lib/apt/lists/* - -RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ - tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ - rm go${GOLANG_VERSION}.linux-amd64.tar.gz - -RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint diff --git a/vendor/github.com/docker/distribution/project/hooks/README.md b/vendor/github.com/docker/distribution/project/hooks/README.md deleted file mode 100644 index eda88696..00000000 --- a/vendor/github.com/docker/distribution/project/hooks/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Git Hooks -========= - -To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. - -As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh b/vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh deleted file mode 100755 index 6afea8a1..00000000 --- a/vendor/github.com/docker/distribution/project/hooks/configure-hooks.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -cd $(dirname $0) - -REPO_ROOT=$(git rev-parse --show-toplevel) -RESOLVE_REPO_ROOT_STATUS=$? -if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then - echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr - exit $RESOLVE_REPO_ROOT_STATUS -fi - -set -e -set -x - -# Just in case the directory doesn't exist -mkdir -p $REPO_ROOT/.git/hooks - -ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/project/hooks/pre-commit b/vendor/github.com/docker/distribution/project/hooks/pre-commit deleted file mode 100755 index 3ee2e913..00000000 --- a/vendor/github.com/docker/distribution/project/hooks/pre-commit +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -REPO_ROOT=$(git rev-parse --show-toplevel) -RESOLVE_REPO_ROOT_STATUS=$? -if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then - printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr - exit $RESOLVE_REPO_ROOT_STATUS -fi - -cd $REPO_ROOT - -GOFMT_ERRORS=$(gofmt -s -l . 2>&1) -if [ -n "$GOFMT_ERRORS" ]; then - printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr - exit 1 -fi - -GOLINT_ERRORS=$(golint ./... 2>&1) -if [ -n "$GOLINT_ERRORS" ]; then - printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr - exit 1 -fi - -GOVET_ERRORS=$(go vet ./... 2>&1) -GOVET_STATUS=$? -if [ "$GOVET_STATUS" -ne "0" ]; then - printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr - exit $GOVET_STATUS -fi diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 00000000..3a5d36c2 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,341 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := repository [ ":" tag ] [ "@" digest ] +// +// // repository.go +// repository := hostname ['/' component]+ +// hostname := hostcomponent [':' port-number] +// component := subcomponent [separator subcomponent]* +// subcomponent := alpha-numeric ['-'* alpha-numeric]* +// hostcomponent := [hostpart '.']* hostpart +// alpha-numeric := /[a-z0-9]+/ +// separator := /([_.]|__)/ +// port-number := /[0-9]+/ +// hostpart := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/ +// +// // tag.go +// tag := /[\w][\w.-]{0,127}/ +// +// // from the digest package +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; Atleast 128 bit digest value +package reference + +import ( + "errors" + "fmt" + + "github.com/docker/distribution/digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than + // RepositoryNameTotalLengthMax + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +func SplitHostname(named Named) (string, string) { + name := named.Name() + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + // TODO(dmcgowan): Provide more specific and helpful error + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + ref := reference{ + name: matches[1], + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.ParseDigest(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + ref, err := Parse(s) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + if !anchoredNameRegexp.MatchString(name) { + return nil, ErrReferenceInvalidFormat + } + return repository(name), nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + return taggedReference{ + name: name.Name(), + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + return canonicalReference{ + name: name.Name(), + digest: digest, + }, nil +} + +func getBestReferenceType(ref reference) Reference { + if ref.name == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + name: ref.name, + digest: ref.digest, + } + } + return repository(ref.name) + } + if ref.digest == "" { + return taggedReference{ + name: ref.name, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + name string + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.name + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Name() string { + return r.name +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository string + +func (r repository) String() string { + return string(r) +} + +func (r repository) Name() string { + return string(r) +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return d.String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + name string + tag string +} + +func (t taggedReference) String() string { + return t.name + ":" + t.tag +} + +func (t taggedReference) Name() string { + return t.name +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + name string + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.name + "@" + c.digest.String() +} + +func (c canonicalReference) Name() string { + return c.name +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go new file mode 100644 index 00000000..8e1ac1f3 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference_test.go @@ -0,0 +1,531 @@ +package reference + +import ( + "encoding/json" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestReferenceParse(t *testing.T) { + // referenceTestcases is a unified set of testcases for + // testing the parsing of references + referenceTestcases := []struct { + // input is the repository name or name component testcase + input string + // err is the error expected from Parse, or nil + err error + // repository is the string representation for the reference + repository string + // hostname is the hostname expected in the reference + hostname string + // tag is the tag for the reference + tag string + // digest is the digest for the reference (enforces digest reference) + digest string + }{ + { + input: "test_com", + repository: "test_com", + }, + { + input: "test.com:tag", + repository: "test.com", + tag: "tag", + }, + { + input: "test.com:5000", + repository: "test.com", + tag: "5000", + }, + { + input: "test.com/repo:tag", + hostname: "test.com", + repository: "test.com/repo", + tag: "tag", + }, + { + input: "test:5000/repo", + hostname: "test:5000", + repository: "test:5000/repo", + }, + { + input: "test:5000/repo:tag", + hostname: "test:5000", + repository: "test:5000/repo", + tag: "tag", + }, + { + input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + hostname: "test:5000", + repository: "test:5000/repo", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + hostname: "test:5000", + repository: "test:5000/repo", + tag: "tag", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo", + hostname: "test:5000", + repository: "test:5000/repo", + }, + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestUnsupported, + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", + hostname: "a", + repository: strings.Repeat("a/", 127) + "a", + tag: "tag-puts-this-over-max", + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + hostname: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + }, + { + input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", + hostname: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + tag: "some-long-tag", + }, + { + input: "b.gcr.io/test.example.com/my-app:test.example.com", + hostname: "b.gcr.io", + repository: "b.gcr.io/test.example.com/my-app", + tag: "test.example.com", + }, + { + input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode + hostname: "xn--n3h.com", + repository: "xn--n3h.com/myimage", + tag: "xn--n3h.com", + }, + { + input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode + hostname: "xn--7o8h.com", + repository: "xn--7o8h.com/myimage", + tag: "xn--7o8h.com", + digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "foo_bar.com:8080", + repository: "foo_bar.com", + tag: "8080", + }, + { + input: "foo/foo_bar.com:8080", + hostname: "foo", + repository: "foo/foo_bar.com", + tag: "8080", + }, + } + for _, testcase := range referenceTestcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + repo, err := Parse(testcase.input) + if testcase.err != nil { + if err == nil { + failf("missing expected error: %v", testcase.err) + } else if testcase.err != err { + failf("mismatched error: got %v, expected %v", err, testcase.err) + } + continue + } else if err != nil { + failf("unexpected parse error: %v", err) + continue + } + if repo.String() != testcase.input { + failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) + } + + if named, ok := repo.(Named); ok { + if named.Name() != testcase.repository { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) + } + hostname, _ := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + } + } else if testcase.repository != "" || testcase.hostname != "" { + failf("expected named type, got %T", repo) + } + + tagged, ok := repo.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", repo) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := repo.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", repo) + } + } else if ok { + failf("unexpected digested type") + } + + } +} + +// TestWithNameFailure tests cases where WithName should fail. Cases where it +// should succeed are covered by TestSplitHostname, below. +func TestWithNameFailure(t *testing.T) { + testcases := []struct { + input string + err error + }{ + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + _, err := WithName(testcase.input) + if err == nil { + failf("no error parsing name. expected: %s", testcase.err) + } + } +} + +func TestSplitHostname(t *testing.T) { + testcases := []struct { + input string + hostname string + name string + }{ + { + input: "test.com/foo", + hostname: "test.com", + name: "foo", + }, + { + input: "test_com/foo", + hostname: "", + name: "test_com/foo", + }, + { + input: "test:8080/foo", + hostname: "test:8080", + name: "foo", + }, + { + input: "test.com:8080/foo", + hostname: "test.com:8080", + name: "foo", + }, + { + input: "test-com:8080/foo", + hostname: "test-com:8080", + name: "foo", + }, + { + input: "xn--n3h.com:18080/foo", + hostname: "xn--n3h.com:18080", + name: "foo", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.input) + if err != nil { + failf("error parsing name: %s", err) + } + hostname, name := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + } + if name != testcase.name { + failf("unexpected name: got %q, expected %q", name, testcase.name) + } + } +} + +type serializationType struct { + Description string + Field Field +} + +func TestSerialization(t *testing.T) { + testcases := []struct { + description string + input string + name string + tag string + digest string + err error + }{ + { + description: "empty value", + err: ErrNameEmpty, + }, + { + description: "just a name", + input: "example.com:8000/named", + name: "example.com:8000/named", + }, + { + description: "name with a tag", + input: "example.com:8000/named:tagged", + name: "example.com:8000/named", + tag: "tagged", + }, + { + description: "name with digest", + input: "other.com/named@sha256:1234567890098765432112345667890098765", + name: "other.com/named", + digest: "sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + m := map[string]string{ + "Description": testcase.description, + "Field": testcase.input, + } + b, err := json.Marshal(m) + if err != nil { + failf("error marshalling: %v", err) + } + t := serializationType{} + + if err := json.Unmarshal(b, &t); err != nil { + if testcase.err == nil { + failf("error unmarshalling: %v", err) + } + if err != testcase.err { + failf("wrong error, expected %v, got %v", testcase.err, err) + } + + continue + } else if testcase.err != nil { + failf("expected error unmarshalling: %v", testcase.err) + } + + if t.Description != testcase.description { + failf("wrong description, expected %q, got %q", testcase.description, t.Description) + } + + ref := t.Field.Reference() + + if named, ok := ref.(Named); ok { + if named.Name() != testcase.name { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) + } + } else if testcase.name != "" { + failf("expected named type, got %T", ref) + } + + tagged, ok := ref.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", ref) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := ref.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", ref) + } + } else if ok { + failf("unexpected digested type") + } + + t = serializationType{ + Description: testcase.description, + Field: AsField(ref), + } + + b2, err := json.Marshal(t) + if err != nil { + failf("error marshing serialization type: %v", err) + } + + if string(b) != string(b2) { + failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) + } + + // Ensure t.Field is not implementing "Reference" directly, getting + // around the Reference type system + var fieldInterface interface{} = t.Field + if _, ok := fieldInterface.(Reference); ok { + failf("field should not implement Reference interface") + } + + } +} + +func TestWithTag(t *testing.T) { + testcases := []struct { + name string + tag string + combined string + }{ + { + name: "test.com/foo", + tag: "tag", + combined: "test.com/foo:tag", + }, + { + name: "foo", + tag: "tag2", + combined: "foo:tag2", + }, + { + name: "test.com:8000/foo", + tag: "tag4", + combined: "test.com:8000/foo:tag4", + }, + { + name: "test.com:8000/foo", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("WithTag failed: %s", err) + } + if tagged.String() != testcase.combined { + failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) + } + } +} + +func TestWithDigest(t *testing.T) { + testcases := []struct { + name string + digest digest.Digest + combined string + }{ + { + name: "test.com/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com/foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + digested, err := WithDigest(named, testcase.digest) + if err != nil { + failf("WithDigest failed: %s", err) + } + if digested.String() != testcase.combined { + failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) + } + } +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 00000000..06ca8db3 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,49 @@ +package reference + +import "regexp" + +var ( + // nameSubComponentRegexp defines the part of the name which must be + // begin and end with an alphanumeric character. These characters can + // be separated by any number of dashes. + nameSubComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[-]+[a-z0-9]+)*`) + + // nameComponentRegexp restricts registry path component names to + // start with at least one letter or number, with following parts able to + // be separated by one period, underscore or double underscore. + nameComponentRegexp = regexp.MustCompile(nameSubComponentRegexp.String() + `(?:(?:[._]|__)` + nameSubComponentRegexp.String() + `)*`) + + nameRegexp = regexp.MustCompile(`(?:` + nameComponentRegexp.String() + `/)*` + nameComponentRegexp.String()) + + hostnameComponentRegexp = regexp.MustCompile(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`) + + // hostnameComponentRegexp restricts the registry hostname component of a repository name to + // start with a component as defined by hostnameRegexp and followed by an optional port. + hostnameRegexp = regexp.MustCompile(`(?:` + hostnameComponentRegexp.String() + `\.)*` + hostnameComponentRegexp.String() + `(?::[0-9]+)?`) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = regexp.MustCompile(`^` + TagRegexp.String() + `$`) + + // DigestRegexp matches valid digests. + DigestRegexp = regexp.MustCompile(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the hostname and name part omitting + // the seperating forward slash from either. + NameRegexp = regexp.MustCompile(`(?:` + hostnameRegexp.String() + `/)?` + nameRegexp.String()) + + // ReferenceRegexp is the full supported format of a reference. The + // regexp has capturing groups for name, tag, and digest components. + ReferenceRegexp = regexp.MustCompile(`^((?:` + hostnameRegexp.String() + `/)?` + nameRegexp.String() + `)(?:[:](` + TagRegexp.String() + `))?(?:[@](` + DigestRegexp.String() + `))?$`) + + // anchoredNameRegexp is used to parse a name value, capturing hostname + anchoredNameRegexp = regexp.MustCompile(`^(?:(` + hostnameRegexp.String() + `)/)?(` + nameRegexp.String() + `)$`) +) diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go new file mode 100644 index 00000000..530a6eb6 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp_test.go @@ -0,0 +1,467 @@ +package reference + +import ( + "regexp" + "strings" + "testing" +) + +type regexpMatch struct { + input string + match bool + subs []string +} + +func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { + matches := r.FindStringSubmatch(m.input) + if m.match && matches != nil { + if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { + t.Fatalf("Bad match result %#v for %q", matches, m.input) + } + if len(matches) < (len(m.subs) + 1) { + t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) + } + for i := range m.subs { + if m.subs[i] != matches[i+1] { + t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) + } + } + } else if m.match { + t.Errorf("Expected match for %q", m.input) + } else if matches != nil { + t.Errorf("Unexpected match for %q", m.input) + } +} + +func TestHostRegexp(t *testing.T) { + hostcases := []regexpMatch{ + { + input: "test.com", + match: true, + }, + { + input: "test.com:10304", + match: true, + }, + { + input: "test.com:http", + match: false, + }, + { + input: "localhost", + match: true, + }, + { + input: "localhost:8080", + match: true, + }, + { + input: "a", + match: true, + }, + { + input: "a.b", + match: true, + }, + { + input: "ab.cd.com", + match: true, + }, + { + input: "a-b.com", + match: true, + }, + { + input: "-ab.com", + match: false, + }, + { + input: "ab-.com", + match: false, + }, + { + input: "ab.c-om", + match: true, + }, + { + input: "ab.-com", + match: false, + }, + { + input: "ab.com-", + match: false, + }, + { + input: "0101.com", + match: true, // TODO(dmcgowan): valid if this should be allowed + }, + { + input: "001a.com", + match: true, + }, + { + input: "b.gbc.io:443", + match: true, + }, + { + input: "b.gbc.io", + match: true, + }, + { + input: "xn--n3h.com", // ☃.com in punycode + match: true, + }, + } + r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`) + for i := range hostcases { + checkRegexp(t, r, hostcases[i]) + } +} + +func TestFullNameRegexp(t *testing.T) { + testcases := []regexpMatch{ + { + input: "", + match: false, + }, + { + input: "short", + match: true, + subs: []string{"", "short"}, + }, + { + input: "simple/name", + match: true, + subs: []string{"simple", "name"}, + }, + { + input: "library/ubuntu", + match: true, + subs: []string{"library", "ubuntu"}, + }, + { + input: "docker/stevvooe/app", + match: true, + subs: []string{"docker", "stevvooe/app"}, + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, + }, + { + input: "aa/aa/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/bb/bb/bb"}, + }, + { + input: "a/a/a/a", + match: true, + subs: []string{"a", "a/a/a"}, + }, + { + input: "a/a/a/a/", + match: false, + }, + { + input: "a//a/a", + match: false, + }, + { + input: "a", + match: true, + subs: []string{"", "a"}, + }, + { + input: "a/aa", + match: true, + subs: []string{"a", "aa"}, + }, + { + input: "a/aa/a", + match: true, + subs: []string{"a", "aa/a"}, + }, + { + input: "foo.com", + match: true, + subs: []string{"", "foo.com"}, + }, + { + input: "foo.com/", + match: false, + }, + { + input: "foo.com:8080/bar", + match: true, + subs: []string{"foo.com:8080", "bar"}, + }, + { + input: "foo.com:http/bar", + match: false, + }, + { + input: "foo.com/bar", + match: true, + subs: []string{"foo.com", "bar"}, + }, + { + input: "foo.com/bar/baz", + match: true, + subs: []string{"foo.com", "bar/baz"}, + }, + { + input: "localhost:8080/bar", + match: true, + subs: []string{"localhost:8080", "bar"}, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + match: true, + subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, + }, + { + input: "blog.foo.com/bar/baz", + match: true, + subs: []string{"blog.foo.com", "bar/baz"}, + }, + { + input: "a^a", + match: false, + }, + { + input: "aa/asdf$$^/aa", + match: false, + }, + { + input: "asdf$$^/aa", + match: false, + }, + { + input: "aa-a/a", + match: true, + subs: []string{"aa-a", "a"}, + }, + { + input: strings.Repeat("a/", 128) + "a", + match: true, + subs: []string{"a", strings.Repeat("a/", 127) + "a"}, + }, + { + input: "a-/a/a/a", + match: false, + }, + { + input: "foo.com/a-/a/a", + match: false, + }, + { + input: "-foo/bar", + match: false, + }, + { + input: "foo/bar-", + match: false, + }, + { + input: "foo-/bar", + match: false, + }, + { + input: "foo/-bar", + match: false, + }, + { + input: "_foo/bar", + match: false, + }, + { + input: "foo_bar", + match: true, + subs: []string{"", "foo_bar"}, + }, + { + input: "foo_bar.com", + match: true, + subs: []string{"", "foo_bar.com"}, + }, + { + input: "foo_bar.com:8080", + match: false, + }, + { + input: "foo_bar.com:8080/app", + match: false, + }, + { + input: "foo.com/foo_bar", + match: true, + subs: []string{"foo.com", "foo_bar"}, + }, + { + input: "____/____", + match: false, + }, + { + input: "_docker/_docker", + match: false, + }, + { + input: "docker_/docker_", + match: false, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "xn--n3h.com/myimage", // ☃.com in punycode + match: true, + subs: []string{"xn--n3h.com", "myimage"}, + }, + { + input: "xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"xn--7o8h.com", "myimage"}, + }, + { + input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"example.com", "xn--7o8h.com/myimage"}, + }, + { + input: "example.com/some_separator__underscore/myimage", + match: true, + subs: []string{"example.com", "some_separator__underscore/myimage"}, + }, + { + input: "example.com/__underscore/myimage", + match: false, + }, + { + input: "example.com/..dots/myimage", + match: false, + }, + { + input: "example.com/.dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "docker./docker", + match: false, + }, + { + input: ".docker/docker", + match: false, + }, + { + input: "docker-/docker", + match: false, + }, + { + input: "-docker/docker", + match: false, + }, + { + input: "do..cker/docker", + match: false, + }, + { + input: "do__cker:8080/docker", + match: false, + }, + { + input: "do__cker/docker", + match: true, + subs: []string{"", "do__cker/docker"}, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "registry.io/foo/project--id.module--name.ver---sion--name", + match: true, + subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, + }, + } + for i := range testcases { + checkRegexp(t, anchoredNameRegexp, testcases[i]) + } +} + +func TestReferenceRegexp(t *testing.T) { + testcases := []regexpMatch{ + { + input: "registry.com:8080/myapp:tag", + match: true, + subs: []string{"registry.com:8080/myapp", "tag", ""}, + }, + { + input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@sha256:badbadbadbad", + match: false, + }, + { + input: "registry.com:8080/myapp:invalid~tag", + match: false, + }, + { + input: "bad_hostname.com:8080/myapp:tag", + match: false, + }, + { + input:// localhost treated as name, missing tag with 8080 as tag + "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: false, + }, + { + // localhost will be treated as an image name without a host + input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@bad", + match: false, + }, + { + input: "registry.com:8080/myapp@2bad", + match: false, // TODO(dmcgowan): Support this as valid + }, + } + + for i := range testcases { + checkRegexp(t, ReferenceRegexp, testcases[i]) + } + +} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go index 1a3de01d..001776f8 100644 --- a/vendor/github.com/docker/distribution/registry.go +++ b/vendor/github.com/docker/distribution/registry.go @@ -3,7 +3,7 @@ package distribution import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" ) // Scope defines the set of items that match a namespace. @@ -76,13 +76,13 @@ type ManifestService interface { Exists(dgst digest.Digest) (bool, error) // Get retrieves the identified by the digest, if it exists. - Get(dgst digest.Digest) (*manifest.SignedManifest, error) + Get(dgst digest.Digest) (*schema1.SignedManifest, error) // Delete removes the manifest, if it exists. Delete(dgst digest.Digest) error // Put creates or updates the manifest. - Put(manifest *manifest.SignedManifest) error + Put(manifest *schema1.SignedManifest) error // TODO(stevvooe): The methods after this message should be moved to a // discrete TagService, per active proposals. @@ -94,7 +94,7 @@ type ManifestService interface { ExistsByTag(tag string) (bool, error) // GetByTag retrieves the named manifest, if it exists. - GetByTag(tag string, options ...ManifestServiceOption) (*manifest.SignedManifest, error) + GetByTag(tag string, options ...ManifestServiceOption) (*schema1.SignedManifest, error) // TODO(stevvooe): There are several changes that need to be done to this // interface: diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go index 42f911b3..01c34384 100644 --- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -13,15 +13,57 @@ var ( groupToDescriptors = map[string][]ErrorDescriptor{} ) -// ErrorCodeUnknown is a generic error that can be used as a last -// resort if there is no situation-specific error message that can be used -var ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an API classification.`, - HTTPStatusCode: http.StatusInternalServerError, -}) + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavialability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) +) var nextCode = 1000 var registerLock sync.Mutex diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go index 0ef64f88..7eba362a 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -5,6 +5,7 @@ import ( "regexp" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" ) @@ -12,7 +13,7 @@ var ( nameParameterDescriptor = ParameterDescriptor{ Name: "name", Type: "string", - Format: RepositoryNameRegexp.String(), + Format: reference.NameRegexp.String(), Required: true, Description: `Name of the target repository.`, } @@ -20,7 +21,7 @@ var ( referenceParameterDescriptor = ParameterDescriptor{ Name: "reference", Type: "string", - Format: TagNameRegexp.String(), + Format: reference.TagRegexp.String(), Required: true, Description: `Tag or digest of the target manifest.`, } @@ -111,45 +112,67 @@ var ( }, } - unauthorizedResponse = ResponseDescriptor{ - Description: "The client does not have access to the repository.", + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", Headers: []ParameterDescriptor{ authChallengeHeader, { Name: "Content-Length", Type: "integer", - Description: "Length of the JSON error response body.", + Description: "Length of the JSON response body.", Format: "", }, }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, - }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", - Format: unauthorizedErrorsBody, + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, } - unauthorizedResponsePush = ResponseDescriptor{ - Description: "The client does not have access to push to the repository.", - StatusCode: http.StatusUnauthorized, + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", Headers: []ParameterDescriptor{ - authChallengeHeader, { Name: "Content-Length", Type: "integer", - Description: "Length of the JSON error response body.", + Description: "Length of the JSON response body.", Format: "", }, }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", - Format: unauthorizedErrorsBody, + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, }, } ) @@ -345,7 +368,7 @@ var routeDescriptors = []RouteDescriptor{ Name: RouteNameBase, Path: "/v2/", Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization.`, + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, Methods: []MethodDescriptor{ { Method: "GET", @@ -363,24 +386,11 @@ var routeDescriptors = []RouteDescriptor{ }, }, Failures: []ResponseDescriptor{ - { - Description: "The client is not authorized to access the registry.", - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, - }, - }, { Description: "The registry does not implement the V2 API.", StatusCode: http.StatusNotFound, }, + unauthorizedResponseDescriptor, }, }, }, @@ -389,7 +399,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Name: RouteNameTags, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", Entity: "Tags", Description: "Retrieve information about tags.", Methods: []MethodDescriptor{ @@ -432,28 +442,9 @@ var routeDescriptors = []RouteDescriptor{ }, }, Failures: []ResponseDescriptor{ - { - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have access to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, { @@ -487,28 +478,9 @@ var routeDescriptors = []RouteDescriptor{ }, }, Failures: []ResponseDescriptor{ - { - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have access to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -517,7 +489,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Name: RouteNameManifest, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", Entity: "Manifest", Description: "Create, update, delete and retrieve manifests.", Methods: []MethodDescriptor{ @@ -560,29 +532,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have access to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, - }, - }, - { - Description: "The named manifest is not known to the registry.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -637,17 +589,9 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUnknown, }, }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have permission to push to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, { Name: "Missing Layer(s)", Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", @@ -671,22 +615,11 @@ var routeDescriptors = []RouteDescriptor{ }, }, { - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON error response body.", - Format: "", - }, - }, + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, + errcode.ErrorCodeUnsupported, }, }, }, @@ -725,25 +658,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - { - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON error response body.", - Format: "", - }, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, { Name: "Unknown Manifest", Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", @@ -757,6 +674,14 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, }, }, }, @@ -766,7 +691,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlob, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", Entity: "Blob", Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", Methods: []MethodDescriptor{ @@ -829,7 +754,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponse, { Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, @@ -842,6 +766,9 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUnknown, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, { @@ -898,7 +825,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponse, { StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ @@ -914,6 +840,9 @@ var routeDescriptors = []RouteDescriptor{ Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -967,16 +896,19 @@ var routeDescriptors = []RouteDescriptor{ }, }, { - Description: "Delete is not enabled on the registry", + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", StatusCode: http.StatusMethodNotAllowed, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnsupported, + errcode.ErrorCodeUnsupported, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -990,7 +922,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUpload, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", Entity: "Initiate Blob Upload", Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", Methods: []MethodDescriptor{ @@ -1050,7 +982,17 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeNameInvalid, }, }, - unauthorizedResponsePush, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, { @@ -1094,7 +1036,9 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeNameInvalid, }, }, - unauthorizedResponsePush, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -1104,7 +1048,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", Entity: "Blob Upload", Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", Methods: []MethodDescriptor{ @@ -1153,7 +1097,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponse, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, @@ -1165,6 +1108,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -1225,7 +1171,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponsePush, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, @@ -1237,6 +1182,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, { @@ -1304,7 +1252,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponsePush, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, @@ -1320,6 +1267,9 @@ var routeDescriptors = []RouteDescriptor{ Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -1389,13 +1339,13 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, - unauthorizedResponsePush, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, @@ -1407,6 +1357,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -1449,7 +1402,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponse, { Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", StatusCode: http.StatusNotFound, @@ -1461,6 +1413,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go index 87e27f2e..ece52a2c 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -9,24 +9,6 @@ import ( const errGroup = "registry.api.v2" var ( - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - }) - - // ErrorCodeUnauthorized is returned if a request is not authorized. - ErrorCodeUnauthorized = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "access to the requested resource is not authorized", - Description: `The access controller denied access for the operation on - a resource. Often this will be accompanied by a 401 Unauthorized - response status.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ diff --git a/vendor/github.com/docker/distribution/registry/api/v2/names.go b/vendor/github.com/docker/distribution/registry/api/v2/names.go deleted file mode 100644 index 14b7ea60..00000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/names.go +++ /dev/null @@ -1,83 +0,0 @@ -package v2 - -import ( - "fmt" - "regexp" - "strings" -) - -// TODO(stevvooe): Move these definitions to the future "reference" package. -// While they are used with v2 definitions, their relevance expands beyond. - -const ( - // RepositoryNameTotalLengthMax is the maximum total number of characters in - // a repository name - RepositoryNameTotalLengthMax = 255 -) - -// RepositoryNameComponentRegexp restricts registry path component names to -// start with at least one letter or number, with following parts able to -// be separated by one period, dash or underscore. -var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) - -// RepositoryNameComponentAnchoredRegexp is the version of -// RepositoryNameComponentRegexp which must completely match the content -var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) - -// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow -// multiple path components, separated by a forward slash. -var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/)*` + RepositoryNameComponentRegexp.String()) - -// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. -var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) - -// TagNameAnchoredRegexp matches valid tag names, anchored at the start and -// end of the matched string. -var TagNameAnchoredRegexp = regexp.MustCompile("^" + TagNameRegexp.String() + "$") - -var ( - // ErrRepositoryNameEmpty is returned for empty, invalid repository names. - ErrRepositoryNameEmpty = fmt.Errorf("repository name must have at least one component") - - // ErrRepositoryNameLong is returned when a repository name is longer than - // RepositoryNameTotalLengthMax - ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) - - // ErrRepositoryNameComponentInvalid is returned when a repository name does - // not match RepositoryNameComponentRegexp - ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) -) - -// ValidateRepositoryName ensures the repository name is valid for use in the -// registry. This function accepts a superset of what might be accepted by -// docker core or docker hub. If the name does not pass validation, an error, -// describing the conditions, is returned. -// -// Effectively, the name should comply with the following grammar: -// -// alpha-numeric := /[a-z0-9]+/ -// separator := /[._-]/ -// component := alpha-numeric [separator alpha-numeric]* -// namespace := component ['/' component]* -// -// The result of the production, known as the "namespace", should be limited -// to 255 characters. -func ValidateRepositoryName(name string) error { - if name == "" { - return ErrRepositoryNameEmpty - } - - if len(name) > RepositoryNameTotalLengthMax { - return ErrRepositoryNameLong - } - - components := strings.Split(name, "/") - - for _, component := range components { - if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { - return ErrRepositoryNameComponentInvalid - } - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/names_test.go b/vendor/github.com/docker/distribution/registry/api/v2/names_test.go deleted file mode 100644 index 656ae846..00000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/names_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package v2 - -import ( - "strconv" - "strings" - "testing" -) - -var ( - // regexpTestcases is a unified set of testcases for - // TestValidateRepositoryName and TestRepositoryNameRegexp. - // Some of them are valid inputs for one and not the other. - regexpTestcases = []struct { - // input is the repository name or name component testcase - input string - // err is the error expected from ValidateRepositoryName, or nil - err error - // invalid should be true if the testcase is *not* expected to - // match RepositoryNameRegexp - invalid bool - }{ - { - input: "", - err: ErrRepositoryNameEmpty, - }, - { - input: "short", - }, - { - input: "simple/name", - }, - { - input: "library/ubuntu", - }, - { - input: "docker/stevvooe/app", - }, - { - input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - }, - { - input: "aa/aa/bb/bb/bb", - }, - { - input: "a/a/a/b/b", - }, - { - input: "a/a/a/a/", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "a//a/a", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "a", - }, - { - input: "a/aa", - }, - { - input: "aa/a", - }, - { - input: "a/aa/a", - }, - { - input: "foo.com/", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - // TODO: this testcase should be valid once we switch to - // the reference package. - input: "foo.com:8080/bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo.com/bar", - }, - { - input: "foo.com/bar/baz", - }, - { - input: "foo.com/bar/baz/quux", - }, - { - input: "blog.foo.com/bar/baz", - }, - { - input: "asdf", - }, - { - input: "asdf$$^/aa", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "aa-a/aa", - }, - { - input: "aa/aa", - }, - { - input: "a-a/a-a", - }, - { - input: "a-/a/a/a", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: strings.Repeat("a", 255), - }, - { - input: strings.Repeat("a", 256), - err: ErrRepositoryNameLong, - }, - { - input: "-foo/bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo/bar-", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo-/bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo/-bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "_foo/bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo/bar_", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "____/____", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "_docker/_docker", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "docker_/docker_", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "b.gcr.io/test.example.com/my-app", // embedded domain component - }, - // TODO(stevvooe): The following is a punycode domain name that we may - // want to allow in the future. Currently, this is not allowed but we - // may want to change this in the future. Adding this here as invalid - // for the time being. - { - input: "xn--n3h.com/myimage", // http://☃.com in punycode - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "xn--7o8h.com/myimage", // http://🐳.com in punycode - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - } -) - -// TestValidateRepositoryName tests the ValidateRepositoryName function, -// which uses RepositoryNameComponentAnchoredRegexp for validation -func TestValidateRepositoryName(t *testing.T) { - for _, testcase := range regexpTestcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - if err := ValidateRepositoryName(testcase.input); err != testcase.err { - if testcase.err != nil { - if err != nil { - failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err) - } else { - failf("expected invalid repository: %v", testcase.err) - } - } else { - if err != nil { - // Wrong error returned. - failf("unexpected error validating repository name: %v, expected %v", err, testcase.err) - } else { - failf("unexpected error validating repository name: %v", err) - } - } - } - } -} - -func TestRepositoryNameRegexp(t *testing.T) { - for _, testcase := range regexpTestcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - matches := RepositoryNameRegexp.FindString(testcase.input) == testcase.input - if matches == testcase.invalid { - if testcase.invalid { - failf("expected invalid repository name %s", testcase.input) - } else { - failf("expected valid repository name %s", testcase.input) - } - } - } -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go b/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go index b8d724df..f6379977 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go @@ -170,6 +170,14 @@ func TestRouter(t *testing.T) { "name": "foo/bar/manifests", }, }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/locahost:8080/foo/bar/baz/manifests/tag", + Vars: map[string]string{ + "name": "locahost:8080/foo/bar/baz", + "reference": "tag", + }, + }, } checkTestRouter(t, testCases, "", true) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go index 1113a7dd..61d41547 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go @@ -158,8 +158,9 @@ func TestBuilderFromRequest(t *testing.T) { forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com") testRequests := []struct { - request *http.Request - base string + request *http.Request + base string + configHost url.URL }{ { request: &http.Request{URL: u, Host: u.Host}, @@ -177,10 +178,23 @@ func TestBuilderFromRequest(t *testing.T) { request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, base: "http://first.example.com", }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, + base: "https://third.example.com:5000", + configHost: url.URL{ + Scheme: "https", + Host: "third.example.com:5000", + }, + }, } for _, tr := range testRequests { - builder := NewURLBuilderFromRequest(tr.request) + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost) + } else { + builder = NewURLBuilderFromRequest(tr.request) + } for _, testCase := range makeURLBuilderTestCases(builder) { url, err := testCase.build() @@ -207,8 +221,9 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { forwardedProtoHeader.Set("X-Forwarded-Proto", "https") testRequests := []struct { - request *http.Request - base string + request *http.Request + base string + configHost url.URL }{ { request: &http.Request{URL: u, Host: u.Host}, @@ -218,10 +233,23 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com/prefix/", }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://subdomain.example.com/prefix/", + configHost: url.URL{ + Scheme: "https", + Host: "subdomain.example.com/prefix", + }, + }, } for _, tr := range testRequests { - builder := NewURLBuilderFromRequest(tr.request) + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost) + } else { + builder = NewURLBuilderFromRequest(tr.request) + } for _, testCase := range makeURLBuilderTestCases(builder) { url, err := testCase.build() diff --git a/vendor/github.com/docker/distribution/registry/auth/auth.go b/vendor/github.com/docker/distribution/registry/auth/auth.go deleted file mode 100644 index 862c8d28..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/auth.go +++ /dev/null @@ -1,142 +0,0 @@ -// Package auth defines a standard interface for request access controllers. -// -// An access controller has a simple interface with a single `Authorized` -// method which checks that a given request is authorized to perform one or -// more actions on one or more resources. This method should return a non-nil -// error if the request is not authorized. -// -// An implementation registers its access controller by name with a constructor -// which accepts an options map for configuring the access controller. -// -// options := map[string]interface{}{"sillySecret": "whysosilly?"} -// accessController, _ := auth.GetAccessController("silly", options) -// -// This `accessController` can then be used in a request handler like so: -// -// func updateOrder(w http.ResponseWriter, r *http.Request) { -// orderNumber := r.FormValue("orderNumber") -// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} -// access := auth.Access{Resource: resource, Action: "update"} -// -// if ctx, err := accessController.Authorized(ctx, access); err != nil { -// if challenge, ok := err.(auth.Challenge) { -// // Let the challenge write the response. -// challenge.ServeHTTP(w, r) -// } else { -// // Some other error. -// } -// } -// } -// -package auth - -import ( - "fmt" - "net/http" - - "github.com/docker/distribution/context" -) - -// UserInfo carries information about -// an autenticated/authorized client. -type UserInfo struct { - Name string -} - -// Resource describes a resource by type and name. -type Resource struct { - Type string - Name string -} - -// Access describes a specific action that is -// requested or allowed for a given resource. -type Access struct { - Resource - Action string -} - -// Challenge is a special error type which is used for HTTP 401 Unauthorized -// responses and is able to write the response with WWW-Authenticate challenge -// header values based on the error. -type Challenge interface { - error - - // SetHeaders prepares the request to conduct a challenge response by - // adding the an HTTP challenge header on the response message. Callers - // are expected to set the appropriate HTTP status code (e.g. 401) - // themselves. - SetHeaders(w http.ResponseWriter) -} - -// AccessController controls access to registry resources based on a request -// and required access levels for a request. Implementations can support both -// complete denial and http authorization challenges. -type AccessController interface { - // Authorized returns a non-nil error if the context is granted access and - // returns a new authorized context. If one or more Access structs are - // provided, the requested access will be compared with what is available - // to the context. The given context will contain a "http.request" key with - // a `*http.Request` value. If the error is non-nil, access should always - // be denied. The error may be of type Challenge, in which case the caller - // may have the Challenge handle the request or choose what action to take - // based on the Challenge header or response status. The returned context - // object should have a "auth.user" value set to a UserInfo struct. - Authorized(ctx context.Context, access ...Access) (context.Context, error) -} - -// WithUser returns a context with the authorized user info. -func WithUser(ctx context.Context, user UserInfo) context.Context { - return userInfoContext{ - Context: ctx, - user: user, - } -} - -type userInfoContext struct { - context.Context - user UserInfo -} - -func (uic userInfoContext) Value(key interface{}) interface{} { - switch key { - case "auth.user": - return uic.user - case "auth.user.name": - return uic.user.Name - } - - return uic.Context.Value(key) -} - -// InitFunc is the type of an AccessController factory function and is used -// to register the constructor for different AccesController backends. -type InitFunc func(options map[string]interface{}) (AccessController, error) - -var accessControllers map[string]InitFunc - -func init() { - accessControllers = make(map[string]InitFunc) -} - -// Register is used to register an InitFunc for -// an AccessController backend with the given name. -func Register(name string, initFunc InitFunc) error { - if _, exists := accessControllers[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - accessControllers[name] = initFunc - - return nil -} - -// GetAccessController constructs an AccessController -// with the given options using the named backend. -func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { - if initFunc, exists := accessControllers[name]; exists { - return initFunc(options) - } - - return nil, fmt.Errorf("no access controller registered with name: %s", name) -} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go deleted file mode 100644 index 5ac3d84a..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access.go +++ /dev/null @@ -1,102 +0,0 @@ -// Package htpasswd provides a simple authentication scheme that checks for the -// user credential hash in an htpasswd formatted file in a configuration-determined -// location. -// -// This authentication method MUST be used under TLS, as simple token-replay attack is possible. -package htpasswd - -import ( - "errors" - "fmt" - "net/http" - "os" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -var ( - // ErrInvalidCredential is returned when the auth token does not authenticate correctly. - ErrInvalidCredential = errors.New("invalid authorization credential") - - // ErrAuthenticationFailure returned when authentication failure to be presented to agent. - ErrAuthenticationFailure = errors.New("authentication failured") -) - -type accessController struct { - realm string - htpasswd *htpasswd -} - -var _ auth.AccessController = &accessController{} - -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - realm, present := options["realm"] - if _, ok := realm.(string); !present || !ok { - return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) - } - - path, present := options["path"] - if _, ok := path.(string); !present || !ok { - return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) - } - - f, err := os.Open(path.(string)) - if err != nil { - return nil, err - } - defer f.Close() - - h, err := newHTPasswd(f) - if err != nil { - return nil, err - } - - return &accessController{realm: realm.(string), htpasswd: h}, nil -} - -func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - username, password, ok := req.BasicAuth() - if !ok { - return nil, &challenge{ - realm: ac.realm, - err: ErrInvalidCredential, - } - } - - if err := ac.htpasswd.authenticateUser(username, password); err != nil { - context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) - return nil, &challenge{ - realm: ac.realm, - err: ErrAuthenticationFailure, - } - } - - return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil -} - -// challenge implements the auth.Challenge interface. -type challenge struct { - realm string - err error -} - -var _ auth.Challenge = challenge{} - -// SetHeaders sets the basic challenge header on the response. -func (ch challenge) SetHeaders(w http.ResponseWriter) { - w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) -} - -func (ch challenge) Error() string { - return fmt.Sprintf("basic authentication challenge: %#v", ch) -} - -func init() { - auth.Register("htpasswd", auth.InitFunc(newAccessController)) -} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go deleted file mode 100644 index db040547..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/htpasswd/access_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package htpasswd - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -func TestBasicAccessController(t *testing.T) { - testRealm := "The-Shire" - testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} - testPasswords := []string{"baggins", "baggins", "새주", "공주님"} - testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= - frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W - MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 - DeokMan:공주님` - - tempFile, err := ioutil.TempFile("", "htpasswd-test") - if err != nil { - t.Fatal("could not create temporary htpasswd file") - } - if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { - t.Fatal("could not write temporary htpasswd file") - } - - options := map[string]interface{}{ - "realm": testRealm, - "path": tempFile.Name(), - } - ctx := context.Background() - - accessController, err := newAccessController(options) - if err != nil { - t.Fatal("error creating access controller") - } - - tempFile.Close() - - var userNumber = 0 - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithRequest(ctx, r) - authCtx, err := accessController.Authorized(ctx) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - err.SetHeaders(w) - w.WriteHeader(http.StatusUnauthorized) - return - default: - t.Fatalf("unexpected error authorizing request: %v", err) - } - } - - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) - if !ok { - t.Fatal("basic accessController did not set auth.user context") - } - - if userInfo.Name != testUsers[userNumber] { - t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) - } - - w.WriteHeader(http.StatusNoContent) - })) - - client := &http.Client{ - CheckRedirect: nil, - } - - req, _ := http.NewRequest("GET", server.URL, nil) - resp, err := client.Do(req) - - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) - } - - nonbcrypt := map[string]struct{}{ - "bilbo": {}, - "DeokMan": {}, - } - - for i := 0; i < len(testUsers); i++ { - userNumber = i - req, err := http.NewRequest("GET", server.URL, nil) - if err != nil { - t.Fatalf("error allocating new request: %v", err) - } - - req.SetBasicAuth(testUsers[i], testPasswords[i]) - - resp, err = client.Do(req) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - if _, ok := nonbcrypt[testUsers[i]]; ok { - // these are not allowed. - // Request should be authorized - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) - } - } else { - // Request should be authorized - if resp.StatusCode != http.StatusNoContent { - t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) - } - } - } - -} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go deleted file mode 100644 index 494ad0a7..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go +++ /dev/null @@ -1,80 +0,0 @@ -package htpasswd - -import ( - "bufio" - "fmt" - "io" - "strings" - - "golang.org/x/crypto/bcrypt" -) - -// htpasswd holds a path to a system .htpasswd file and the machinery to parse -// it. Only bcrypt hash entries are supported. -type htpasswd struct { - entries map[string][]byte // maps username to password byte slice. -} - -// newHTPasswd parses the reader and returns an htpasswd or an error. -func newHTPasswd(rd io.Reader) (*htpasswd, error) { - entries, err := parseHTPasswd(rd) - if err != nil { - return nil, err - } - - return &htpasswd{entries: entries}, nil -} - -// AuthenticateUser checks a given user:password credential against the -// receiving HTPasswd's file. If the check passes, nil is returned. -func (htpasswd *htpasswd) authenticateUser(username string, password string) error { - credentials, ok := htpasswd.entries[username] - if !ok { - // timing attack paranoia - bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) - - return ErrAuthenticationFailure - } - - err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) - if err != nil { - return ErrAuthenticationFailure - } - - return nil -} - -// parseHTPasswd parses the contents of htpasswd. This will read all the -// entries in the file, whether or not they are needed. An error is returned -// if an syntax errors are encountered or if the reader fails. -func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { - entries := map[string][]byte{} - scanner := bufio.NewScanner(rd) - var line int - for scanner.Scan() { - line++ // 1-based line numbering - t := strings.TrimSpace(scanner.Text()) - - if len(t) < 1 { - continue - } - - // lines that *begin* with a '#' are considered comments - if t[0] == '#' { - continue - } - - i := strings.Index(t, ":") - if i < 0 || i >= len(t) { - return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) - } - - entries[t[:i]] = []byte(t[i+1:]) - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - return entries, nil -} diff --git a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go b/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go deleted file mode 100644 index 309c359a..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package htpasswd - -import ( - "fmt" - "reflect" - "strings" - "testing" -) - -func TestParseHTPasswd(t *testing.T) { - - for _, tc := range []struct { - desc string - input string - err error - entries map[string][]byte - }{ - { - desc: "basic example", - input: ` -# This is a comment in a basic example. -bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= -frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W -MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 -DeokMan:공주님 -`, - entries: map[string][]byte{ - "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), - "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), - "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), - "DeokMan": []byte("공주님"), - }, - }, - { - desc: "ensures comments are filtered", - input: ` -# asdf:asdf -`, - }, - { - desc: "ensure midline hash is not comment", - input: ` -asdf:as#df -`, - entries: map[string][]byte{ - "asdf": []byte("as#df"), - }, - }, - { - desc: "ensure midline hash is not comment", - input: ` -# A valid comment -valid:entry -asdf -`, - err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), - }, - } { - - entries, err := parseHTPasswd(strings.NewReader(tc.input)) - if err != tc.err { - if tc.err == nil { - t.Fatalf("%s: unexpected error: %v", tc.desc, err) - } else { - if err.Error() != tc.err.Error() { // use string equality here. - t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) - } - } - } - - if tc.err != nil { - continue // don't test output - } - - // allow empty and nil to be equal - if tc.entries == nil { - tc.entries = map[string][]byte{} - } - - if !reflect.DeepEqual(entries, tc.entries) { - t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) - } - } - -} diff --git a/vendor/github.com/docker/distribution/registry/auth/silly/access.go b/vendor/github.com/docker/distribution/registry/auth/silly/access.go deleted file mode 100644 index 2b801d94..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/silly/access.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package silly provides a simple authentication scheme that checks for the -// existence of an Authorization header and issues access if is present and -// non-empty. -// -// This package is present as an example implementation of a minimal -// auth.AccessController and for testing. This is not suitable for any kind of -// production security. -package silly - -import ( - "fmt" - "net/http" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -// accessController provides a simple implementation of auth.AccessController -// that simply checks for a non-empty Authorization header. It is useful for -// demonstration and testing. -type accessController struct { - realm string - service string -} - -var _ auth.AccessController = &accessController{} - -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - realm, present := options["realm"] - if _, ok := realm.(string); !present || !ok { - return nil, fmt.Errorf(`"realm" must be set for silly access controller`) - } - - service, present := options["service"] - if _, ok := service.(string); !present || !ok { - return nil, fmt.Errorf(`"service" must be set for silly access controller`) - } - - return &accessController{realm: realm.(string), service: service.(string)}, nil -} - -// Authorized simply checks for the existence of the authorization header, -// responding with a bearer challenge if it doesn't exist. -func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - if req.Header.Get("Authorization") == "" { - challenge := challenge{ - realm: ac.realm, - service: ac.service, - } - - if len(accessRecords) > 0 { - var scopes []string - for _, access := range accessRecords { - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) - } - challenge.scope = strings.Join(scopes, " ") - } - - return nil, &challenge - } - - return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil -} - -type challenge struct { - realm string - service string - scope string -} - -var _ auth.Challenge = challenge{} - -// SetHeaders sets a simple bearer challenge on the response. -func (ch challenge) SetHeaders(w http.ResponseWriter) { - header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) - - if ch.scope != "" { - header = fmt.Sprintf("%s,scope=%q", header, ch.scope) - } - - w.Header().Set("WWW-Authenticate", header) -} - -func (ch challenge) Error() string { - return fmt.Sprintf("silly authentication challenge: %#v", ch) -} - -// init registers the silly auth backend. -func init() { - auth.Register("silly", auth.InitFunc(newAccessController)) -} diff --git a/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go b/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go deleted file mode 100644 index 8b5ecb80..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/silly/access_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package silly - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution/registry/auth" - "golang.org/x/net/context" -) - -func TestSillyAccessController(t *testing.T) { - ac := &accessController{ - realm: "test-realm", - service: "test-service", - } - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithValue(nil, "http.request", r) - authCtx, err := ac.Authorized(ctx) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - err.SetHeaders(w) - w.WriteHeader(http.StatusUnauthorized) - return - default: - t.Fatalf("unexpected error authorizing request: %v", err) - } - } - - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) - if !ok { - t.Fatal("silly accessController did not set auth.user context") - } - - if userInfo.Name != "silly" { - t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) - } - - w.WriteHeader(http.StatusNoContent) - })) - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) - } - - req, err := http.NewRequest("GET", server.URL, nil) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - req.Header.Set("Authorization", "seriously, anything") - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusNoContent { - t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) - } -} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go deleted file mode 100644 index 5b1ff7ca..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/token/accesscontroller.go +++ /dev/null @@ -1,268 +0,0 @@ -package token - -import ( - "crypto" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" - "github.com/docker/libtrust" -) - -// accessSet maps a typed, named resource to -// a set of actions requested or authorized. -type accessSet map[auth.Resource]actionSet - -// newAccessSet constructs an accessSet from -// a variable number of auth.Access items. -func newAccessSet(accessItems ...auth.Access) accessSet { - accessSet := make(accessSet, len(accessItems)) - - for _, access := range accessItems { - resource := auth.Resource{ - Type: access.Type, - Name: access.Name, - } - - set, exists := accessSet[resource] - if !exists { - set = newActionSet() - accessSet[resource] = set - } - - set.add(access.Action) - } - - return accessSet -} - -// contains returns whether or not the given access is in this accessSet. -func (s accessSet) contains(access auth.Access) bool { - actionSet, ok := s[access.Resource] - if ok { - return actionSet.contains(access.Action) - } - - return false -} - -// scopeParam returns a collection of scopes which can -// be used for a WWW-Authenticate challenge parameter. -// See https://tools.ietf.org/html/rfc6750#section-3 -func (s accessSet) scopeParam() string { - scopes := make([]string, 0, len(s)) - - for resource, actionSet := range s { - actions := strings.Join(actionSet.keys(), ",") - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) - } - - return strings.Join(scopes, " ") -} - -// Errors used and exported by this package. -var ( - ErrInsufficientScope = errors.New("insufficient scope") - ErrTokenRequired = errors.New("authorization token required") -) - -// authChallenge implements the auth.Challenge interface. -type authChallenge struct { - err error - realm string - service string - accessSet accessSet -} - -var _ auth.Challenge = authChallenge{} - -// Error returns the internal error string for this authChallenge. -func (ac authChallenge) Error() string { - return ac.err.Error() -} - -// Status returns the HTTP Response Status Code for this authChallenge. -func (ac authChallenge) Status() int { - return http.StatusUnauthorized -} - -// challengeParams constructs the value to be used in -// the WWW-Authenticate response challenge header. -// See https://tools.ietf.org/html/rfc6750#section-3 -func (ac authChallenge) challengeParams() string { - str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) - - if scope := ac.accessSet.scopeParam(); scope != "" { - str = fmt.Sprintf("%s,scope=%q", str, scope) - } - - if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { - str = fmt.Sprintf("%s,error=%q", str, "invalid_token") - } else if ac.err == ErrInsufficientScope { - str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") - } - - return str -} - -// SetChallenge sets the WWW-Authenticate value for the response. -func (ac authChallenge) SetHeaders(w http.ResponseWriter) { - w.Header().Add("WWW-Authenticate", ac.challengeParams()) -} - -// accessController implements the auth.AccessController interface. -type accessController struct { - realm string - issuer string - service string - rootCerts *x509.CertPool - trustedKeys map[string]libtrust.PublicKey -} - -// tokenAccessOptions is a convenience type for handling -// options to the contstructor of an accessController. -type tokenAccessOptions struct { - realm string - issuer string - service string - rootCertBundle string -} - -// checkOptions gathers the necessary options -// for an accessController from the given map. -func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { - var opts tokenAccessOptions - - keys := []string{"realm", "issuer", "service", "rootcertbundle"} - vals := make([]string, 0, len(keys)) - for _, key := range keys { - val, ok := options[key].(string) - if !ok { - return opts, fmt.Errorf("token auth requires a valid option string: %q", key) - } - vals = append(vals, val) - } - - opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] - - return opts, nil -} - -// newAccessController creates an accessController using the given options. -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - config, err := checkOptions(options) - if err != nil { - return nil, err - } - - fp, err := os.Open(config.rootCertBundle) - if err != nil { - return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) - } - defer fp.Close() - - rawCertBundle, err := ioutil.ReadAll(fp) - if err != nil { - return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) - } - - var rootCerts []*x509.Certificate - pemBlock, rawCertBundle := pem.Decode(rawCertBundle) - for pemBlock != nil { - cert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) - } - - rootCerts = append(rootCerts, cert) - - pemBlock, rawCertBundle = pem.Decode(rawCertBundle) - } - - if len(rootCerts) == 0 { - return nil, errors.New("token auth requires at least one token signing root certificate") - } - - rootPool := x509.NewCertPool() - trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) - for _, rootCert := range rootCerts { - rootPool.AddCert(rootCert) - pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) - if err != nil { - return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) - } - trustedKeys[pubKey.KeyID()] = pubKey - } - - return &accessController{ - realm: config.realm, - issuer: config.issuer, - service: config.service, - rootCerts: rootPool, - trustedKeys: trustedKeys, - }, nil -} - -// Authorized handles checking whether the given request is authorized -// for actions on resources described by the given access items. -func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { - challenge := &authChallenge{ - realm: ac.realm, - service: ac.service, - accessSet: newAccessSet(accessItems...), - } - - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - parts := strings.Split(req.Header.Get("Authorization"), " ") - - if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { - challenge.err = ErrTokenRequired - return nil, challenge - } - - rawToken := parts[1] - - token, err := NewToken(rawToken) - if err != nil { - challenge.err = err - return nil, challenge - } - - verifyOpts := VerifyOptions{ - TrustedIssuers: []string{ac.issuer}, - AcceptedAudiences: []string{ac.service}, - Roots: ac.rootCerts, - TrustedKeys: ac.trustedKeys, - } - - if err = token.Verify(verifyOpts); err != nil { - challenge.err = err - return nil, challenge - } - - accessSet := token.accessSet() - for _, access := range accessItems { - if !accessSet.contains(access) { - challenge.err = ErrInsufficientScope - return nil, challenge - } - } - - return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil -} - -// init handles registering the token auth backend. -func init() { - auth.Register("token", auth.InitFunc(newAccessController)) -} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/stringset.go b/vendor/github.com/docker/distribution/registry/auth/token/stringset.go deleted file mode 100644 index 1d04f104..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/token/stringset.go +++ /dev/null @@ -1,35 +0,0 @@ -package token - -// StringSet is a useful type for looking up strings. -type stringSet map[string]struct{} - -// NewStringSet creates a new StringSet with the given strings. -func newStringSet(keys ...string) stringSet { - ss := make(stringSet, len(keys)) - ss.add(keys...) - return ss -} - -// Add inserts the given keys into this StringSet. -func (ss stringSet) add(keys ...string) { - for _, key := range keys { - ss[key] = struct{}{} - } -} - -// Contains returns whether the given key is in this StringSet. -func (ss stringSet) contains(key string) bool { - _, ok := ss[key] - return ok -} - -// Keys returns a slice of all keys in this StringSet. -func (ss stringSet) keys() []string { - keys := make([]string, 0, len(ss)) - - for key := range ss { - keys = append(keys, key) - } - - return keys -} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token.go b/vendor/github.com/docker/distribution/registry/auth/token/token.go deleted file mode 100644 index 166816ee..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/token/token.go +++ /dev/null @@ -1,343 +0,0 @@ -package token - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/libtrust" - - "github.com/docker/distribution/registry/auth" -) - -const ( - // TokenSeparator is the value which separates the header, claims, and - // signature in the compact serialization of a JSON Web Token. - TokenSeparator = "." -) - -// Errors used by token parsing and verification. -var ( - ErrMalformedToken = errors.New("malformed token") - ErrInvalidToken = errors.New("invalid token") -) - -// ResourceActions stores allowed actions on a named and typed resource. -type ResourceActions struct { - Type string `json:"type"` - Name string `json:"name"` - Actions []string `json:"actions"` -} - -// ClaimSet describes the main section of a JSON Web Token. -type ClaimSet struct { - // Public claims - Issuer string `json:"iss"` - Subject string `json:"sub"` - Audience string `json:"aud"` - Expiration int64 `json:"exp"` - NotBefore int64 `json:"nbf"` - IssuedAt int64 `json:"iat"` - JWTID string `json:"jti"` - - // Private claims - Access []*ResourceActions `json:"access"` -} - -// Header describes the header section of a JSON Web Token. -type Header struct { - Type string `json:"typ"` - SigningAlg string `json:"alg"` - KeyID string `json:"kid,omitempty"` - X5c []string `json:"x5c,omitempty"` - RawJWK json.RawMessage `json:"jwk,omitempty"` -} - -// Token describes a JSON Web Token. -type Token struct { - Raw string - Header *Header - Claims *ClaimSet - Signature []byte -} - -// VerifyOptions is used to specify -// options when verifying a JSON Web Token. -type VerifyOptions struct { - TrustedIssuers []string - AcceptedAudiences []string - Roots *x509.CertPool - TrustedKeys map[string]libtrust.PublicKey -} - -// NewToken parses the given raw token string -// and constructs an unverified JSON Web Token. -func NewToken(rawToken string) (*Token, error) { - parts := strings.Split(rawToken, TokenSeparator) - if len(parts) != 3 { - return nil, ErrMalformedToken - } - - var ( - rawHeader, rawClaims = parts[0], parts[1] - headerJSON, claimsJSON []byte - err error - ) - - defer func() { - if err != nil { - log.Errorf("error while unmarshalling raw token: %s", err) - } - }() - - if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { - err = fmt.Errorf("unable to decode header: %s", err) - return nil, ErrMalformedToken - } - - if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { - err = fmt.Errorf("unable to decode claims: %s", err) - return nil, ErrMalformedToken - } - - token := new(Token) - token.Header = new(Header) - token.Claims = new(ClaimSet) - - token.Raw = strings.Join(parts[:2], TokenSeparator) - if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { - err = fmt.Errorf("unable to decode signature: %s", err) - return nil, ErrMalformedToken - } - - if err = json.Unmarshal(headerJSON, token.Header); err != nil { - return nil, ErrMalformedToken - } - - if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { - return nil, ErrMalformedToken - } - - return token, nil -} - -// Verify attempts to verify this token using the given options. -// Returns a nil error if the token is valid. -func (t *Token) Verify(verifyOpts VerifyOptions) error { - // Verify that the Issuer claim is a trusted authority. - if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { - log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) - return ErrInvalidToken - } - - // Verify that the Audience claim is allowed. - if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { - log.Errorf("token intended for another audience: %q", t.Claims.Audience) - return ErrInvalidToken - } - - // Verify that the token is currently usable and not expired. - currentUnixTime := time.Now().Unix() - if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { - log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) - return ErrInvalidToken - } - - // Verify the token signature. - if len(t.Signature) == 0 { - log.Error("token has no signature") - return ErrInvalidToken - } - - // Verify that the signing key is trusted. - signingKey, err := t.VerifySigningKey(verifyOpts) - if err != nil { - log.Error(err) - return ErrInvalidToken - } - - // Finally, verify the signature of the token using the key which signed it. - if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { - log.Errorf("unable to verify token signature: %s", err) - return ErrInvalidToken - } - - return nil -} - -// VerifySigningKey attempts to get the key which was used to sign this token. -// The token header should contain either of these 3 fields: -// `x5c` - The x509 certificate chain for the signing key. Needs to be -// verified. -// `jwk` - The JSON Web Key representation of the signing key. -// May contain its own `x5c` field which needs to be verified. -// `kid` - The unique identifier for the key. This library interprets it -// as a libtrust fingerprint. The key itself can be looked up in -// the trustedKeys field of the given verify options. -// Each of these methods are tried in that order of preference until the -// signing key is found or an error is returned. -func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { - // First attempt to get an x509 certificate chain from the header. - var ( - x5c = t.Header.X5c - rawJWK = t.Header.RawJWK - keyID = t.Header.KeyID - ) - - switch { - case len(x5c) > 0: - signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) - case len(rawJWK) > 0: - signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) - case len(keyID) > 0: - signingKey = verifyOpts.TrustedKeys[keyID] - if signingKey == nil { - err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) - } - default: - err = errors.New("unable to get token signing key") - } - - return -} - -func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { - if len(x5c) == 0 { - return nil, errors.New("empty x509 certificate chain") - } - - // Ensure the first element is encoded correctly. - leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) - if err != nil { - return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) - } - - // And that it is a valid x509 certificate. - leafCert, err := x509.ParseCertificate(leafCertDer) - if err != nil { - return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) - } - - // The rest of the certificate chain are intermediate certificates. - intermediates := x509.NewCertPool() - for i := 1; i < len(x5c); i++ { - intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) - if err != nil { - return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) - } - - intermediateCert, err := x509.ParseCertificate(intermediateCertDer) - if err != nil { - return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) - } - - intermediates.AddCert(intermediateCert) - } - - verifyOpts := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: roots, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - } - - // TODO: this call returns certificate chains which we ignore for now, but - // we should check them for revocations if we have the ability later. - if _, err = leafCert.Verify(verifyOpts); err != nil { - return nil, fmt.Errorf("unable to verify certificate chain: %s", err) - } - - // Get the public key from the leaf certificate. - leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) - if !ok { - return nil, errors.New("unable to get leaf cert public key value") - } - - leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) - if err != nil { - return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) - } - - return -} - -func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { - pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) - if err != nil { - return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) - } - - // Check to see if the key includes a certificate chain. - x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) - if !ok { - // The JWK should be one of the trusted root keys. - if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { - return nil, errors.New("untrusted JWK with no certificate chain") - } - - // The JWK is one of the trusted keys. - return - } - - // Ensure each item in the chain is of the correct type. - x5c := make([]string, len(x5cVal)) - for i, val := range x5cVal { - certString, ok := val.(string) - if !ok || len(certString) == 0 { - return nil, errors.New("malformed certificate chain") - } - x5c[i] = certString - } - - // Ensure that the x509 certificate chain can - // be verified up to one of our trusted roots. - leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) - if err != nil { - return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) - } - - // Verify that the public key in the leaf cert *is* the signing key. - if pubKey.KeyID() != leafKey.KeyID() { - return nil, errors.New("leaf certificate public key ID does not match JWK key ID") - } - - return -} - -// accessSet returns a set of actions available for the resource -// actions listed in the `access` section of this token. -func (t *Token) accessSet() accessSet { - if t.Claims == nil { - return nil - } - - accessSet := make(accessSet, len(t.Claims.Access)) - - for _, resourceActions := range t.Claims.Access { - resource := auth.Resource{ - Type: resourceActions.Type, - Name: resourceActions.Name, - } - - set, exists := accessSet[resource] - if !exists { - set = newActionSet() - accessSet[resource] = set - } - - for _, action := range resourceActions.Actions { - set.add(action) - } - } - - return accessSet -} - -func (t *Token) compactRaw() string { - return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) -} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/token_test.go b/vendor/github.com/docker/distribution/registry/auth/token/token_test.go deleted file mode 100644 index 9d84d4ef..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/token/token_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package token - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" - "testing" - "time" - - "github.com/docker/distribution/registry/auth" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { - keys := make([]libtrust.PrivateKey, 0, numKeys) - - for i := 0; i < numKeys; i++ { - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err - } - keys = append(keys, key) - } - - return keys, nil -} - -func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { - if depth == 0 { - // Don't need to build a chain. - return rootKey, nil - } - - var ( - x5c = make([]string, depth) - parentKey = rootKey - key libtrust.PrivateKey - cert *x509.Certificate - err error - ) - - for depth > 0 { - if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { - return nil, err - } - - if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { - return nil, err - } - - depth-- - x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) - parentKey = key - } - - key.AddExtendedField("x5c", x5c) - - return key, nil -} - -func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { - certs := make([]*x509.Certificate, 0, len(rootKeys)) - - for _, key := range rootKeys { - cert, err := libtrust.GenerateCACert(key, key) - if err != nil { - return nil, err - } - certs = append(certs, cert) - } - - return certs, nil -} - -func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { - trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) - - for _, key := range rootKeys { - trustedKeys[key.KeyID()] = key.PublicKey() - } - - return trustedKeys -} - -func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { - signingKey, err := makeSigningKeyWithChain(rootKey, depth) - if err != nil { - return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) - } - - rawJWK, err := signingKey.PublicKey().MarshalJSON() - if err != nil { - return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) - } - - joseHeader := &Header{ - Type: "JWT", - SigningAlg: "ES256", - RawJWK: json.RawMessage(rawJWK), - } - - now := time.Now() - - randomBytes := make([]byte, 15) - if _, err = rand.Read(randomBytes); err != nil { - return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) - } - - claimSet := &ClaimSet{ - Issuer: issuer, - Subject: "foo", - Audience: audience, - Expiration: now.Add(5 * time.Minute).Unix(), - NotBefore: now.Unix(), - IssuedAt: now.Unix(), - JWTID: base64.URLEncoding.EncodeToString(randomBytes), - Access: access, - } - - var joseHeaderBytes, claimSetBytes []byte - - if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { - return nil, fmt.Errorf("unable to marshal jose header: %s", err) - } - if claimSetBytes, err = json.Marshal(claimSet); err != nil { - return nil, fmt.Errorf("unable to marshal claim set: %s", err) - } - - encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) - encodedClaimSet := joseBase64UrlEncode(claimSetBytes) - encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) - - var signatureBytes []byte - if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { - return nil, fmt.Errorf("unable to sign jwt payload: %s", err) - } - - signature := joseBase64UrlEncode(signatureBytes) - tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) - - return NewToken(tokenString) -} - -// This test makes 4 tokens with a varying number of intermediate -// certificates ranging from no intermediate chain to a length of 3 -// intermediates. -func TestTokenVerify(t *testing.T) { - var ( - numTokens = 4 - issuer = "test-issuer" - audience = "test-audience" - access = []*ResourceActions{ - { - Type: "repository", - Name: "foo/bar", - Actions: []string{"pull", "push"}, - }, - } - ) - - rootKeys, err := makeRootKeys(numTokens) - if err != nil { - t.Fatal(err) - } - - rootCerts, err := makeRootCerts(rootKeys) - if err != nil { - t.Fatal(err) - } - - rootPool := x509.NewCertPool() - for _, rootCert := range rootCerts { - rootPool.AddCert(rootCert) - } - - trustedKeys := makeTrustedKeyMap(rootKeys) - - tokens := make([]*Token, 0, numTokens) - - for i := 0; i < numTokens; i++ { - token, err := makeTestToken(issuer, audience, access, rootKeys[i], i) - if err != nil { - t.Fatal(err) - } - tokens = append(tokens, token) - } - - verifyOps := VerifyOptions{ - TrustedIssuers: []string{issuer}, - AcceptedAudiences: []string{audience}, - Roots: rootPool, - TrustedKeys: trustedKeys, - } - - for _, token := range tokens { - if err := token.Verify(verifyOps); err != nil { - t.Fatal(err) - } - } -} - -func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { - rootCerts, err := makeRootCerts(rootKeys) - if err != nil { - return "", err - } - - tempFile, err := ioutil.TempFile("", "rootCertBundle") - if err != nil { - return "", err - } - defer tempFile.Close() - - for _, cert := range rootCerts { - if err = pem.Encode(tempFile, &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }); err != nil { - os.Remove(tempFile.Name()) - return "", err - } - } - - return tempFile.Name(), nil -} - -// TestAccessController tests complete integration of the token auth package. -// It starts by mocking the options for a token auth accessController which -// it creates. It then tries a few mock requests: -// - don't supply a token; should error with challenge -// - supply an invalid token; should error with challenge -// - supply a token with insufficient access; should error with challenge -// - supply a valid token; should not error -func TestAccessController(t *testing.T) { - // Make 2 keys; only the first is to be a trusted root key. - rootKeys, err := makeRootKeys(2) - if err != nil { - t.Fatal(err) - } - - rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) - if err != nil { - t.Fatal(err) - } - defer os.Remove(rootCertBundleFilename) - - realm := "https://auth.example.com/token/" - issuer := "test-issuer.example.com" - service := "test-service.example.com" - - options := map[string]interface{}{ - "realm": realm, - "issuer": issuer, - "service": service, - "rootcertbundle": rootCertBundleFilename, - } - - accessController, err := newAccessController(options) - if err != nil { - t.Fatal(err) - } - - // 1. Make a mock http.Request with no token. - req, err := http.NewRequest("GET", "http://example.com/foo", nil) - if err != nil { - t.Fatal(err) - } - - testAccess := auth.Access{ - Resource: auth.Resource{ - Type: "foo", - Name: "bar", - }, - Action: "baz", - } - - ctx := context.WithValue(nil, "http.request", req) - authCtx, err := accessController.Authorized(ctx, testAccess) - challenge, ok := err.(auth.Challenge) - if !ok { - t.Fatal("accessController did not return a challenge") - } - - if challenge.Error() != ErrTokenRequired.Error() { - t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) - } - - if authCtx != nil { - t.Fatalf("expected nil auth context but got %s", authCtx) - } - - // 2. Supply an invalid token. - token, err := makeTestToken( - issuer, service, - []*ResourceActions{{ - Type: testAccess.Type, - Name: testAccess.Name, - Actions: []string{testAccess.Action}, - }}, - rootKeys[1], 1, // Everything is valid except the key which signed it. - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - authCtx, err = accessController.Authorized(ctx, testAccess) - challenge, ok = err.(auth.Challenge) - if !ok { - t.Fatal("accessController did not return a challenge") - } - - if challenge.Error() != ErrInvalidToken.Error() { - t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) - } - - if authCtx != nil { - t.Fatalf("expected nil auth context but got %s", authCtx) - } - - // 3. Supply a token with insufficient access. - token, err = makeTestToken( - issuer, service, - []*ResourceActions{}, // No access specified. - rootKeys[0], 1, - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - authCtx, err = accessController.Authorized(ctx, testAccess) - challenge, ok = err.(auth.Challenge) - if !ok { - t.Fatal("accessController did not return a challenge") - } - - if challenge.Error() != ErrInsufficientScope.Error() { - t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) - } - - if authCtx != nil { - t.Fatalf("expected nil auth context but got %s", authCtx) - } - - // 4. Supply the token we need, or deserve, or whatever. - token, err = makeTestToken( - issuer, service, - []*ResourceActions{{ - Type: testAccess.Type, - Name: testAccess.Name, - Actions: []string{testAccess.Action}, - }}, - rootKeys[0], 1, - ) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) - - authCtx, err = accessController.Authorized(ctx, testAccess) - if err != nil { - t.Fatalf("accessController returned unexpected error: %s", err) - } - - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) - if !ok { - t.Fatal("token accessController did not set auth.user context") - } - - if userInfo.Name != "foo" { - t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) - } -} diff --git a/vendor/github.com/docker/distribution/registry/auth/token/util.go b/vendor/github.com/docker/distribution/registry/auth/token/util.go deleted file mode 100644 index d7f95be4..00000000 --- a/vendor/github.com/docker/distribution/registry/auth/token/util.go +++ /dev/null @@ -1,58 +0,0 @@ -package token - -import ( - "encoding/base64" - "errors" - "strings" -) - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters omitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -// actionSet is a special type of stringSet. -type actionSet struct { - stringSet -} - -func newActionSet(actions ...string) actionSet { - return actionSet{newStringSet(actions...)} -} - -// Contains calls StringSet.Contains() for -// either "*" or the given action string. -func (s actionSet) contains(action string) bool { - return s.stringSet.contains("*") || s.stringSet.contains(action) -} - -// contains returns true if q is found in ss. -func contains(ss []string, q string) bool { - for _, s := range ss { - if s == q { - return true - } - } - - return false -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go index 27a2aa71..6c92fc34 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/transport" ) @@ -85,11 +86,24 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { return nil } +// This is the minimum duration a token can last (in seconds). +// A token must not live less than 60 seconds because older versions +// of the Docker client didn't read their expiration from the token +// response and assumed 60 seconds. So to remain compatible with +// those implementations, a token must live at least this long. +const minimumTokenLifetimeSeconds = 60 + +// Private interface for time used by this package to enable tests to provide their own implementation. +type clock interface { + Now() time.Time +} + type tokenHandler struct { header http.Header creds CredentialStore scope tokenScope transport http.RoundTripper + clock clock tokenLock sync.Mutex tokenCache string @@ -108,12 +122,24 @@ func (ts tokenScope) String() string { return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) } +// An implementation of clock for providing real time data. +type realClock struct{} + +// Now implements clock +func (realClock) Now() time.Time { return time.Now() } + // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + return newTokenHandler(transport, creds, realClock{}, scope, actions...) +} + +// newTokenHandler exposes the option to provide a clock to manipulate time in unit testing. +func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock, scope string, actions ...string) AuthenticationHandler { return &tokenHandler{ transport: transport, creds: creds, + clock: c, scope: tokenScope{ Resource: "repository", Scope: scope, @@ -146,40 +172,43 @@ func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]st func (th *tokenHandler) refreshToken(params map[string]string) error { th.tokenLock.Lock() defer th.tokenLock.Unlock() - now := time.Now() + now := th.clock.Now() if now.After(th.tokenExpiration) { - token, err := th.fetchToken(params) + tr, err := th.fetchToken(params) if err != nil { return err } - th.tokenCache = token - th.tokenExpiration = now.Add(time.Minute) + th.tokenCache = tr.Token + th.tokenExpiration = tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second) } return nil } type tokenResponse struct { - Token string `json:"token"` + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` } -func (th *tokenHandler) fetchToken(params map[string]string) (token string, err error) { +func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) realm, ok := params["realm"] if !ok { - return "", errors.New("no realm specified for token auth challenge") + return nil, errors.New("no realm specified for token auth challenge") } // TODO(dmcgowan): Handle empty scheme realmURL, err := url.Parse(realm) if err != nil { - return "", fmt.Errorf("invalid token auth challenge realm: %s", err) + return nil, fmt.Errorf("invalid token auth challenge realm: %s", err) } req, err := http.NewRequest("GET", realmURL.String(), nil) if err != nil { - return "", err + return nil, err } reqParams := req.URL.Query() @@ -206,26 +235,44 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token string, err resp, err := th.client().Do(req) if err != nil { - return "", err + return nil, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { - return "", fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) } decoder := json.NewDecoder(resp.Body) tr := new(tokenResponse) if err = decoder.Decode(tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) + return nil, fmt.Errorf("unable to decode token response: %s", err) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken } if tr.Token == "" { - return "", errors.New("authorization server did not include a token in the response") + return nil, errors.New("authorization server did not include a token in the response") } - return tr.Token, nil + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now() + } + + return tr, nil } type basicHandler struct { diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session_test.go b/vendor/github.com/docker/distribution/registry/client/auth/session_test.go index 1b4754ab..f1686942 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/session_test.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/session_test.go @@ -7,11 +7,20 @@ import ( "net/http/httptest" "net/url" "testing" + "time" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/testutil" ) +// An implementation of clock for providing fake time data. +type fakeClock struct { + current time.Time +} + +// Now implements clock +func (fc *fakeClock) Now() time.Time { return fc.current } + func testServer(rrm testutil.RequestResponseMap) (string, func()) { h := testutil.NewHandler(rrm) s := httptest.NewServer(h) @@ -210,7 +219,7 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { }, Response: testutil.Response{ StatusCode: http.StatusOK, - Body: []byte(`{"token":"statictoken"}`), + Body: []byte(`{"access_token":"statictoken"}`), }, }, }) @@ -265,6 +274,285 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { } } +func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + clock := &fakeClock{current: time.Now()} + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + timeIncrement := 1000 * time.Second + for i := 0; i < 4; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + +func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + // This test sets things up such that the token was issued one increment + // earlier than its sibling in TestEndpointAuthorizeTokenBasicWithExpiresIn. + // This will mean that the token expires after 3 increments instead of 4. + clock := &fakeClock{current: time.Now()} + timeIncrement := 1000 * time.Second + firstIssuedAt := clock.Now() + clock.current = clock.current.Add(timeIncrement) + secondIssuedAt := clock.current.Add(2 * timeIncrement) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "issued_at": "` + firstIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "issued_at": "` + secondIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + // We shaved one increment off of the equivalent logic in TestEndpointAuthorizeTokenBasicWithExpiresIn + // so this loop should have one fewer iteration. + for i := 0; i < 3; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + func TestEndpointAuthorizeBasic(t *testing.T) { m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go index ebd1c36c..7305c021 100644 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -8,7 +8,6 @@ import ( "net/http" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" ) // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is @@ -52,7 +51,7 @@ func handleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { err := parseHTTPErrorResponse(resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { - return v2.ErrorCodeUnauthorized.WithDetail(uErr.Response) + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go index c1e8e07f..fc709ded 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -14,7 +14,8 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" @@ -96,9 +97,9 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri return numFilled, returnErr } -// NewRepository creates a new Repository for the given repository name and base URL +// NewRepository creates a new Repository for the given repository name and base URL. func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - if err := v2.ValidateRepositoryName(name); err != nil { + if _, err := reference.ParseNamed(name); err != nil { return nil, err } @@ -211,8 +212,6 @@ func (ms *manifests) Tags() ([]string, error) { } return tagsResponse.Tags, nil - } else if resp.StatusCode == http.StatusNotFound { - return nil, nil } return nil, handleErrorResponse(resp) } @@ -242,7 +241,7 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { return false, handleErrorResponse(resp) } -func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { +func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { // Call by Tag endpoint since the API uses the same // URL endpoint for tags and digests. return ms.GetByTag(dgst.String()) @@ -262,7 +261,7 @@ func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { } } -func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { +func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { for _, option := range options { err := option(ms) if err != nil { @@ -288,9 +287,9 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic } defer resp.Body.Close() if resp.StatusCode == http.StatusNotModified { - return nil, nil + return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { - var sm manifest.SignedManifest + var sm schema1.SignedManifest decoder := json.NewDecoder(resp.Body) if err := decoder.Decode(&sm); err != nil { @@ -301,7 +300,7 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic return nil, handleErrorResponse(resp) } -func (ms *manifests) Put(m *manifest.SignedManifest) error { +func (ms *manifests) Put(m *schema1.SignedManifest) error { manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag) if err != nil { return err @@ -358,25 +357,18 @@ type blobs struct { distribution.BlobDeleter } -func sanitizeLocation(location, source string) (string, error) { +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + locationURL, err := url.Parse(location) if err != nil { return "", err } - if locationURL.Scheme == "" { - sourceURL, err := url.Parse(source) - if err != nil { - return "", err - } - locationURL = &url.URL{ - Scheme: sourceURL.Scheme, - Host: sourceURL.Host, - Path: location, - } - location = locationURL.String() - } - return location, nil + return baseURL.ResolveReference(locationURL).String(), nil } func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { diff --git a/vendor/github.com/docker/distribution/registry/client/repository_test.go b/vendor/github.com/docker/distribution/registry/client/repository_test.go index 26201763..1e6eb25f 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository_test.go +++ b/vendor/github.com/docker/distribution/registry/client/repository_test.go @@ -3,7 +3,6 @@ package client import ( "bytes" "crypto/rand" - "encoding/json" "fmt" "io" "log" @@ -14,15 +13,15 @@ import ( "testing" "time" - "github.com/docker/distribution/uuid" - "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/testutil" + "github.com/docker/distribution/uuid" + "github.com/docker/libtrust" ) func testServer(rrm testutil.RequestResponseMap) (string, func()) { @@ -420,41 +419,49 @@ func TestBlobUploadMonolithic(t *testing.T) { } } -func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*manifest.SignedManifest, digest.Digest) { - blobs := make([]manifest.FSLayer, blobCount) - history := make([]manifest.History, blobCount) +func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { + blobs := make([]schema1.FSLayer, blobCount) + history := make([]schema1.History, blobCount) for i := 0; i < blobCount; i++ { dgst, blob := newRandomBlob((i % 5) * 16) - blobs[i] = manifest.FSLayer{BlobSum: dgst} - history[i] = manifest.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} + blobs[i] = schema1.FSLayer{BlobSum: dgst} + history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} } - m := &manifest.SignedManifest{ - Manifest: manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, + m := schema1.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, }, } - manifestBytes, err := json.Marshal(m) - if err != nil { - panic(err) - } - dgst, err := digest.FromBytes(manifestBytes) + + pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { panic(err) } - m.Raw = manifestBytes + sm, err := schema1.Sign(&m, pk) + if err != nil { + panic(err) + } - return m, dgst + p, err := sm.Payload() + if err != nil { + panic(err) + } + + dgst, err := digest.FromBytes(p) + if err != nil { + panic(err) + } + + return sm, dgst, p } func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { @@ -522,7 +529,7 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request } -func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { +func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { if m1.Name != m2.Name { return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) } @@ -551,7 +558,7 @@ func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { func TestManifestFetch(t *testing.T) { ctx := context.Background() repo := "test.example.com/repo" - m1, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap addTestManifest(repo, dgst.String(), m1.Raw, &m) @@ -586,9 +593,9 @@ func TestManifestFetch(t *testing.T) { func TestManifestFetchWithEtag(t *testing.T) { repo := "test.example.com/repo/by/tag" - m1, d1 := newRandomSchemaV1Manifest(repo, "latest", 6) + _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap - addTestManifestWithEtag(repo, "latest", m1.Raw, &m, d1.String()) + addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) e, c := testServer(m) defer c() @@ -603,19 +610,16 @@ func TestManifestFetchWithEtag(t *testing.T) { t.Fatal(err) } - m2, err := ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) - if err != nil { + _, err = ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) + if err != distribution.ErrManifestNotModified { t.Fatal(err) } - if m2 != nil { - t.Fatal("Expected empty manifest for matching etag") - } } func TestManifestDelete(t *testing.T) { repo := "test.example.com/repo/delete" - _, dgst1 := newRandomSchemaV1Manifest(repo, "latest", 6) - _, dgst2 := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -654,7 +658,7 @@ func TestManifestDelete(t *testing.T) { func TestManifestPut(t *testing.T) { repo := "test.example.com/repo/delete" - m1, dgst := newRandomSchemaV1Manifest(repo, "other", 6) + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -747,7 +751,7 @@ func TestManifestTags(t *testing.T) { func TestManifestUnauthorized(t *testing.T) { repo := "test.example.com/repo" - _, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ @@ -782,10 +786,10 @@ func TestManifestUnauthorized(t *testing.T) { if !ok { t.Fatalf("Unexpected error type: %#v", err) } - if v2Err.Code != v2.ErrorCodeUnauthorized { + if v2Err.Code != errcode.ErrorCodeUnauthorized { t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) } - if expected := v2.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { + if expected := errcode.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) } } @@ -857,3 +861,49 @@ func TestCatalogInParts(t *testing.T) { t.Fatalf("Got wrong number of repos") } } + +func TestSanitizeLocation(t *testing.T) { + for _, testcase := range []struct { + description string + location string + source string + expected string + err error + }{ + { + description: "ensure relative location correctly resolved", + location: "/v2/foo/baasdf", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf", + }, + { + description: "ensure parameters are preserved", + location: "/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + }, + { + description: "ensure new hostname overidden", + location: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + source: "http://blahalaja.com/v1", + expected: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + }, + } { + fatalf := func(format string, args ...interface{}) { + t.Fatalf(testcase.description+": "+format, args...) + } + + s, err := sanitizeLocation(testcase.location, testcase.source) + if err != testcase.err { + if testcase.err != nil { + fatalf("expected error: %v != %v", err, testcase) + } else { + fatalf("unexpected error sanitizing: %v", err) + } + } + + if s != testcase.expected { + fatalf("bad sanitize: %q != %q", s, testcase.expected) + } + } +} diff --git a/vendor/github.com/docker/distribution/registry/doc.go b/vendor/github.com/docker/distribution/registry/doc.go index 1c01e42e..a1ba7f3a 100644 --- a/vendor/github.com/docker/distribution/registry/doc.go +++ b/vendor/github.com/docker/distribution/registry/doc.go @@ -1,3 +1,2 @@ -// Package registry is a placeholder package for registry interface -// definitions and utilities. +// Package registry provides the main entrypoints for running a registry. package registry diff --git a/vendor/github.com/docker/distribution/registry/handlers/api_test.go b/vendor/github.com/docker/distribution/registry/handlers/api_test.go deleted file mode 100644 index c484835f..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/api_test.go +++ /dev/null @@ -1,1380 +0,0 @@ -package handlers - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/http/httputil" - "net/url" - "os" - "path" - "reflect" - "regexp" - "strconv" - "strings" - "testing" - - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - _ "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" - "github.com/gorilla/handlers" - "golang.org/x/net/context" -) - -// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified -// 200 OK response. -func TestCheckAPI(t *testing.T) { - env := newTestEnv(t, false) - - baseURL, err := env.builder.BuildBaseURL() - if err != nil { - t.Fatalf("unexpected error building base url: %v", err) - } - - resp, err := http.Get(baseURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing api base check", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Type": []string{"application/json; charset=utf-8"}, - "Content-Length": []string{"2"}, - }) - - p, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unexpected error reading response body: %v", err) - } - - if string(p) != "{}" { - t.Fatalf("unexpected response body: %v", string(p)) - } -} - -// TestCatalogAPI tests the /v2/_catalog endpoint -func TestCatalogAPI(t *testing.T) { - chunkLen := 2 - env := newTestEnv(t, false) - - values := url.Values{ - "last": []string{""}, - "n": []string{strconv.Itoa(chunkLen)}} - - catalogURL, err := env.builder.BuildCatalogURL(values) - if err != nil { - t.Fatalf("unexpected error building catalog url: %v", err) - } - - // ----------------------------------- - // try to get an empty catalog - resp, err := http.Get(catalogURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing catalog api check", resp, http.StatusOK) - - var ctlg struct { - Repositories []string `json:"repositories"` - } - - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&ctlg); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - // we haven't pushed anything to the registry yet - if len(ctlg.Repositories) != 0 { - t.Fatalf("repositories has unexpected values") - } - - if resp.Header.Get("Link") != "" { - t.Fatalf("repositories has more data when none expected") - } - - // ----------------------------------- - // push something to the registry and try again - images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"} - - for _, image := range images { - createRepository(env, t, image, "sometag") - } - - resp, err = http.Get(catalogURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing catalog api check", resp, http.StatusOK) - - dec = json.NewDecoder(resp.Body) - if err = dec.Decode(&ctlg); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if len(ctlg.Repositories) != chunkLen { - t.Fatalf("repositories has unexpected values") - } - - for _, image := range images[:chunkLen] { - if !contains(ctlg.Repositories, image) { - t.Fatalf("didn't find our repository '%s' in the catalog", image) - } - } - - link := resp.Header.Get("Link") - if link == "" { - t.Fatalf("repositories has less data than expected") - } - - newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1]) - - // ----------------------------------- - // get the last chunk of data - - catalogURL, err = env.builder.BuildCatalogURL(newValues) - if err != nil { - t.Fatalf("unexpected error building catalog url: %v", err) - } - - resp, err = http.Get(catalogURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing catalog api check", resp, http.StatusOK) - - dec = json.NewDecoder(resp.Body) - if err = dec.Decode(&ctlg); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if len(ctlg.Repositories) != 1 { - t.Fatalf("repositories has unexpected values") - } - - lastImage := images[len(images)-1] - if !contains(ctlg.Repositories, lastImage) { - t.Fatalf("didn't find our repository '%s' in the catalog", lastImage) - } - - link = resp.Header.Get("Link") - if link != "" { - t.Fatalf("catalog has unexpected data") - } -} - -func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Values { - re := regexp.MustCompile("<(/v2/_catalog.*)>; rel=\"next\"") - matches := re.FindStringSubmatch(urlStr) - - if len(matches) != 2 { - t.Fatalf("Catalog link address response was incorrect") - } - linkURL, _ := url.Parse(matches[1]) - urlValues := linkURL.Query() - - if urlValues.Get("n") != strconv.Itoa(numEntries) { - t.Fatalf("Catalog link entry size is incorrect") - } - - if urlValues.Get("last") != last { - t.Fatal("Catalog link last entry is incorrect") - } - - return urlValues -} - -func contains(elems []string, e string) bool { - for _, elem := range elems { - if elem == e { - return true - } - } - return false -} - -func TestURLPrefix(t *testing.T) { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - }, - } - config.HTTP.Prefix = "/test/" - - env := newTestEnvWithConfig(t, &config) - - baseURL, err := env.builder.BuildBaseURL() - if err != nil { - t.Fatalf("unexpected error building base url: %v", err) - } - - parsed, _ := url.Parse(baseURL) - if !strings.HasPrefix(parsed.Path, config.HTTP.Prefix) { - t.Fatalf("Prefix %v not included in test url %v", config.HTTP.Prefix, baseURL) - } - - resp, err := http.Get(baseURL) - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "issuing api base check", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Type": []string{"application/json; charset=utf-8"}, - "Content-Length": []string{"2"}, - }) -} - -type blobArgs struct { - imageName string - layerFile io.ReadSeeker - layerDigest digest.Digest - tarSumStr string -} - -func makeBlobArgs(t *testing.T) blobArgs { - layerFile, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random layer file: %v", err) - } - - layerDigest := digest.Digest(tarSumStr) - - args := blobArgs{ - imageName: "foo/bar", - layerFile: layerFile, - layerDigest: layerDigest, - tarSumStr: tarSumStr, - } - return args -} - -// TestBlobAPI conducts a full test of the of the blob api. -func TestBlobAPI(t *testing.T) { - deleteEnabled := false - env := newTestEnv(t, deleteEnabled) - args := makeBlobArgs(t) - testBlobAPI(t, env, args) - - deleteEnabled = true - env = newTestEnv(t, deleteEnabled) - args = makeBlobArgs(t) - testBlobAPI(t, env, args) - -} - -func TestBlobDelete(t *testing.T) { - deleteEnabled := true - env := newTestEnv(t, deleteEnabled) - - args := makeBlobArgs(t) - env = testBlobAPI(t, env, args) - testBlobDelete(t, env, args) -} - -func TestBlobDeleteDisabled(t *testing.T) { - deleteEnabled := false - env := newTestEnv(t, deleteEnabled) - args := makeBlobArgs(t) - - imageName := args.imageName - layerDigest := args.layerDigest - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) - if err != nil { - t.Fatalf("error building url: %v", err) - } - - resp, err := httpDelete(layerURL) - if err != nil { - t.Fatalf("unexpected error deleting when disabled: %v", err) - } - - checkResponse(t, "status of disabled delete", resp, http.StatusMethodNotAllowed) -} - -func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { - // TODO(stevvooe): This test code is complete junk but it should cover the - // complete flow. This must be broken down and checked against the - // specification *before* we submit the final to docker core. - imageName := args.imageName - layerFile := args.layerFile - layerDigest := args.layerDigest - - // ----------------------------------- - // Test fetch for non-existent content - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) - if err != nil { - t.Fatalf("error building url: %v", err) - } - - resp, err := http.Get(layerURL) - if err != nil { - t.Fatalf("unexpected error fetching non-existent layer: %v", err) - } - - checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) - - // ------------------------------------------ - // Test head request for non-existent content - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on non-existent layer: %v", err) - } - - checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) - - // ------------------------------------------ - // Start an upload, check the status then cancel - uploadURLBase, uploadUUID := startPushLayer(t, env.builder, imageName) - - // A status check should work - resp, err = http.Get(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error getting upload status: %v", err) - } - checkResponse(t, "status of deleted upload", resp, http.StatusNoContent) - checkHeaders(t, resp, http.Header{ - "Location": []string{"*"}, - "Range": []string{"0-0"}, - "Docker-Upload-UUID": []string{uploadUUID}, - }) - - req, err := http.NewRequest("DELETE", uploadURLBase, nil) - if err != nil { - t.Fatalf("unexpected error creating delete request: %v", err) - } - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error sending delete request: %v", err) - } - - checkResponse(t, "deleting upload", resp, http.StatusNoContent) - - // A status check should result in 404 - resp, err = http.Get(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error getting upload status: %v", err) - } - checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) - - // ----------------------------------------- - // Do layer push with an empty body and different digest - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) - resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) - if err != nil { - t.Fatalf("unexpected error doing bad layer push: %v", err) - } - - checkResponse(t, "bad layer push", resp, http.StatusBadRequest) - checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) - - // ----------------------------------------- - // Do layer push with an empty body and correct digest - zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{})) - if err != nil { - t.Fatalf("unexpected error digesting empty buffer: %v", err) - } - - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) - - // ----------------------------------------- - // Do layer push with an empty body and correct digest - - // This is a valid but empty tarfile! - emptyTar := bytes.Repeat([]byte("\x00"), 1024) - emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar)) - if err != nil { - t.Fatalf("unexpected error digesting empty tar: %v", err) - } - - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) - - // ------------------------------------------ - // Now, actually do successful upload. - layerLength, _ := layerFile.Seek(0, os.SEEK_END) - layerFile.Seek(0, os.SEEK_SET) - - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) - - // ------------------------------------------ - // Now, push just a chunk - layerFile.Seek(0, 0) - - canonicalDigester := digest.Canonical.New() - if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { - t.Fatalf("error copying to digest: %v", err) - } - canonicalDigest := canonicalDigester.Digest() - - layerFile.Seek(0, 0) - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) - uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) - finishUpload(t, env.builder, imageName, uploadURLBase, dgst) - - // ------------------------ - // Use a head request to see if the layer exists. - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on existing layer: %v", err) - } - - checkResponse(t, "checking head on existing layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{canonicalDigest.String()}, - }) - - // ---------------- - // Fetch the layer! - resp, err = http.Get(layerURL) - if err != nil { - t.Fatalf("unexpected error fetching layer: %v", err) - } - - checkResponse(t, "fetching layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{canonicalDigest.String()}, - }) - - // Verify the body - verifier, err := digest.NewDigestVerifier(layerDigest) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - io.Copy(verifier, resp.Body) - - if !verifier.Verified() { - t.Fatalf("response body did not pass verification") - } - - // ---------------- - // Fetch the layer with an invalid digest - badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) - resp, err = http.Get(badURL) - if err != nil { - t.Fatalf("unexpected error fetching layer: %v", err) - } - - checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) - - // Cache headers - resp, err = http.Get(layerURL) - if err != nil { - t.Fatalf("unexpected error fetching layer: %v", err) - } - - checkResponse(t, "fetching layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{canonicalDigest.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, canonicalDigest)}, - "Cache-Control": []string{"max-age=31536000"}, - }) - - // Matching etag, gives 304 - etag := resp.Header.Get("Etag") - req, err = http.NewRequest("GET", layerURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) - - // Non-matching etag, gives 200 - req, err = http.NewRequest("GET", layerURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", "") - resp, err = http.DefaultClient.Do(req) - checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) - - // Missing tests: - // - Upload the same tarsum file under and different repository and - // ensure the content remains uncorrupted. - return env -} - -func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { - // Upload a layer - imageName := args.imageName - layerFile := args.layerFile - layerDigest := args.layerDigest - - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) - if err != nil { - t.Fatalf(err.Error()) - } - // --------------- - // Delete a layer - resp, err := httpDelete(layerURL) - if err != nil { - t.Fatalf("unexpected error deleting layer: %v", err) - } - - checkResponse(t, "deleting layer", resp, http.StatusAccepted) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{"0"}, - }) - - // --------------- - // Try and get it back - // Use a head request to see if the layer exists. - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on existing layer: %v", err) - } - - checkResponse(t, "checking existence of deleted layer", resp, http.StatusNotFound) - - // Delete already deleted layer - resp, err = httpDelete(layerURL) - if err != nil { - t.Fatalf("unexpected error deleting layer: %v", err) - } - - checkResponse(t, "deleting layer", resp, http.StatusNotFound) - - // ---------------- - // Attempt to delete a layer with an invalid digest - badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) - resp, err = httpDelete(badURL) - if err != nil { - t.Fatalf("unexpected error fetching layer: %v", err) - } - - checkResponse(t, "deleting layer bad digest", resp, http.StatusBadRequest) - - // ---------------- - // Reupload previously deleted blob - layerFile.Seek(0, os.SEEK_SET) - - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) - - layerFile.Seek(0, os.SEEK_SET) - canonicalDigester := digest.Canonical.New() - if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { - t.Fatalf("error copying to digest: %v", err) - } - canonicalDigest := canonicalDigester.Digest() - - // ------------------------ - // Use a head request to see if it exists - resp, err = http.Head(layerURL) - if err != nil { - t.Fatalf("unexpected error checking head on existing layer: %v", err) - } - - layerLength, _ := layerFile.Seek(0, os.SEEK_END) - checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{canonicalDigest.String()}, - }) -} - -func TestDeleteDisabled(t *testing.T) { - env := newTestEnv(t, false) - - imageName := "foo/bar" - // "build" our layer file - layerFile, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random layer file: %v", err) - } - - layerDigest := digest.Digest(tarSumStr) - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) - if err != nil { - t.Fatalf("Error building blob URL") - } - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) - - resp, err := httpDelete(layerURL) - if err != nil { - t.Fatalf("unexpected error deleting layer: %v", err) - } - - checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) -} - -func httpDelete(url string) (*http.Response, error) { - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - return nil, err - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - // defer resp.Body.Close() - return resp, err -} - -type manifestArgs struct { - imageName string - signedManifest *manifest.SignedManifest - dgst digest.Digest -} - -func makeManifestArgs(t *testing.T) manifestArgs { - args := manifestArgs{ - imageName: "foo/bar", - } - - return args -} - -func TestManifestAPI(t *testing.T) { - deleteEnabled := false - env := newTestEnv(t, deleteEnabled) - args := makeManifestArgs(t) - testManifestAPI(t, env, args) - - deleteEnabled = true - env = newTestEnv(t, deleteEnabled) - args = makeManifestArgs(t) - testManifestAPI(t, env, args) -} - -func TestManifestDelete(t *testing.T) { - deleteEnabled := true - env := newTestEnv(t, deleteEnabled) - args := makeManifestArgs(t) - env, args = testManifestAPI(t, env, args) - testManifestDelete(t, env, args) -} - -func TestManifestDeleteDisabled(t *testing.T) { - deleteEnabled := false - env := newTestEnv(t, deleteEnabled) - args := makeManifestArgs(t) - testManifestDeleteDisabled(t, env, args) -} - -func testManifestDeleteDisabled(t *testing.T, env *testEnv, args manifestArgs) *testEnv { - imageName := args.imageName - manifestURL, err := env.builder.BuildManifestURL(imageName, digest.DigestSha256EmptyTar) - if err != nil { - t.Fatalf("unexpected error getting manifest url: %v", err) - } - - resp, err := httpDelete(manifestURL) - if err != nil { - t.Fatalf("unexpected error deleting manifest %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) - return nil -} - -func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, manifestArgs) { - imageName := args.imageName - tag := "thetag" - - manifestURL, err := env.builder.BuildManifestURL(imageName, tag) - if err != nil { - t.Fatalf("unexpected error getting manifest url: %v", err) - } - - // ----------------------------- - // Attempt to fetch the manifest - resp, err := http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error getting manifest: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) - checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) - - tagsURL, err := env.builder.BuildTagsURL(imageName) - if err != nil { - t.Fatalf("unexpected error building tags url: %v", err) - } - - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - // Check that we get an unknown repository error when asking for tags - checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) - checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) - - // -------------------------------- - // Attempt to push unsigned manifest with missing layers - unsignedManifest := &manifest.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: imageName, - Tag: tag, - FSLayers: []manifest.FSLayer{ - { - BlobSum: "asdf", - }, - { - BlobSum: "qwer", - }, - }, - } - - resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) - defer resp.Body.Close() - checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest) - _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, - v2.ErrorCodeManifestUnverified, v2.ErrorCodeBlobUnknown, v2.ErrorCodeDigestInvalid) - - expectedCounts := map[errcode.ErrorCode]int{ - v2.ErrorCodeManifestUnverified: 1, - v2.ErrorCodeBlobUnknown: 2, - v2.ErrorCodeDigestInvalid: 2, - } - - if !reflect.DeepEqual(counts, expectedCounts) { - t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) - } - - // TODO(stevvooe): Add a test case where we take a mostly valid registry, - // tamper with the content and ensure that we get a unverified manifest - // error. - - // Push 2 random layers - expectedLayers := make(map[digest.Digest]io.ReadSeeker) - - for i := range unsignedManifest.FSLayers { - rs, dgstStr, err := testutil.CreateRandomTarFile() - - if err != nil { - t.Fatalf("error creating random layer %d: %v", i, err) - } - dgst := digest.Digest(dgstStr) - - expectedLayers[dgst] = rs - unsignedManifest.FSLayers[i].BlobSum = dgst - - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) - } - - // ------------------- - // Push the signed manifest with all layers pushed. - signedManifest, err := manifest.Sign(unsignedManifest, env.pk) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - - payload, err := signedManifest.Payload() - checkErr(t, err, "getting manifest payload") - - dgst, err := digest.FromBytes(payload) - checkErr(t, err, "digesting manifest") - - args.signedManifest = signedManifest - args.dgst = dgst - - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) - checkErr(t, err, "building manifest url") - - resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // -------------------- - // Push by digest -- should get same result - resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // ------------------ - // Fetch by tag name - resp, err = http.Get(manifestURL) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedManifest manifest.SignedManifest - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&fetchedManifest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) { - t.Fatalf("manifests do not match") - } - - // --------------- - // Fetch by digest - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "fetching manifest by digest") - defer resp.Body.Close() - - checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedManifestByDigest manifest.SignedManifest - dec = json.NewDecoder(resp.Body) - if err := dec.Decode(&fetchedManifestByDigest); err != nil { - t.Fatalf("error decoding fetched manifest: %v", err) - } - - if !bytes.Equal(fetchedManifestByDigest.Raw, signedManifest.Raw) { - t.Fatalf("manifests do not match") - } - - // Get by name with etag, gives 304 - etag := resp.Header.Get("Etag") - req, err := http.NewRequest("GET", manifestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) - - // Get by digest with etag, gives 304 - req, err = http.NewRequest("GET", manifestDigestURL, nil) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - req.Header.Set("If-None-Match", etag) - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Error constructing request: %s", err) - } - - checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) - - // Ensure that the tag is listed. - resp, err = http.Get(tagsURL) - if err != nil { - t.Fatalf("unexpected error getting unknown tags: %v", err) - } - defer resp.Body.Close() - - // Check that we get an unknown repository error when asking for tags - checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) - dec = json.NewDecoder(resp.Body) - - var tagsResponse tagsAPIResponse - - if err := dec.Decode(&tagsResponse); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if tagsResponse.Name != imageName { - t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) - } - - if len(tagsResponse.Tags) != 1 { - t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) - } - - if tagsResponse.Tags[0] != tag { - t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) - } - - return env, args -} - -func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { - imageName := args.imageName - dgst := args.dgst - signedManifest := args.signedManifest - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) - // --------------- - // Delete by digest - resp, err := httpDelete(manifestDigestURL) - checkErr(t, err, "deleting manifest by digest") - - checkResponse(t, "deleting manifest", resp, http.StatusAccepted) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{"0"}, - }) - - // --------------- - // Attempt to fetch deleted manifest - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "fetching deleted manifest by digest") - defer resp.Body.Close() - - checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) - - // --------------- - // Delete already deleted manifest by digest - resp, err = httpDelete(manifestDigestURL) - checkErr(t, err, "re-deleting manifest by digest") - - checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) - - // -------------------- - // Re-upload manifest by digest - resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // --------------- - // Attempt to fetch re-uploaded deleted digest - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "fetching re-uploaded manifest by digest") - defer resp.Body.Close() - - checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // --------------- - // Attempt to delete an unknown manifest - unknownDigest := "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - unknownManifestDigestURL, err := env.builder.BuildManifestURL(imageName, unknownDigest) - checkErr(t, err, "building unknown manifest url") - - resp, err = httpDelete(unknownManifestDigestURL) - checkErr(t, err, "delting unknown manifest by digest") - checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) - -} - -type testEnv struct { - pk libtrust.PrivateKey - ctx context.Context - config configuration.Configuration - app *App - server *httptest.Server - builder *v2.URLBuilder -} - -func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - "delete": configuration.Parameters{"enabled": deleteEnabled}, - }, - } - - return newTestEnvWithConfig(t, &config) -} - -func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { - ctx := context.Background() - - app := NewApp(ctx, *config) - server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := v2.NewURLBuilderFromString(server.URL + config.HTTP.Prefix) - - if err != nil { - t.Fatalf("error creating url builder: %v", err) - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - return &testEnv{ - pk: pk, - ctx: ctx, - config: *config, - app: app, - server: server, - builder: builder, - } -} - -func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { - var body []byte - if sm, ok := v.(*manifest.SignedManifest); ok { - body = sm.Raw - } else { - var err error - body, err = json.MarshalIndent(v, "", " ") - if err != nil { - t.Fatalf("unexpected error marshaling %v: %v", v, err) - } - } - - req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) - if err != nil { - t.Fatalf("error creating request for %s: %v", msg, err) - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("error doing put request while %s: %v", msg, err) - } - - return resp -} - -func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) (location string, uuid string) { - layerUploadURL, err := ub.BuildBlobUploadURL(name) - if err != nil { - t.Fatalf("unexpected error building layer upload url: %v", err) - } - - resp, err := http.Post(layerUploadURL, "", nil) - if err != nil { - t.Fatalf("unexpected error starting layer push: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted) - - u, err := url.Parse(resp.Header.Get("Location")) - if err != nil { - t.Fatalf("error parsing location header: %v", err) - } - - uuid = path.Base(u.Path) - checkHeaders(t, resp, http.Header{ - "Location": []string{"*"}, - "Content-Length": []string{"0"}, - "Docker-Upload-UUID": []string{uuid}, - }) - - return resp.Header.Get("Location"), uuid -} - -// doPushLayer pushes the layer content returning the url on success returning -// the response. If you're only expecting a successful response, use pushLayer. -func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { - u, err := url.Parse(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error parsing pushLayer url: %v", err) - } - - u.RawQuery = url.Values{ - "_state": u.Query()["_state"], - - "digest": []string{dgst.String()}, - }.Encode() - - uploadURL := u.String() - - // Just do a monolithic upload - req, err := http.NewRequest("PUT", uploadURL, body) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - - return http.DefaultClient.Do(req) -} - -// pushLayer pushes the layer content returning the url on success. -func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { - digester := digest.Canonical.New() - - resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) - if err != nil { - t.Fatalf("unexpected error doing push layer request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) - - if err != nil { - t.Fatalf("error generating sha256 digest of body") - } - - sha256Dgst := digester.Digest() - - expectedLayerURL, err := ub.BuildBlobURL(name, sha256Dgst) - if err != nil { - t.Fatalf("error building expected layer url: %v", err) - } - - checkHeaders(t, resp, http.Header{ - "Location": []string{expectedLayerURL}, - "Content-Length": []string{"0"}, - "Docker-Content-Digest": []string{sha256Dgst.String()}, - }) - - return resp.Header.Get("Location") -} - -func finishUpload(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, dgst digest.Digest) string { - resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) - if err != nil { - t.Fatalf("unexpected error doing push layer request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) - - expectedLayerURL, err := ub.BuildBlobURL(name, dgst) - if err != nil { - t.Fatalf("error building expected layer url: %v", err) - } - - checkHeaders(t, resp, http.Header{ - "Location": []string{expectedLayerURL}, - "Content-Length": []string{"0"}, - "Docker-Content-Digest": []string{dgst.String()}, - }) - - return resp.Header.Get("Location") -} - -func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Response, digest.Digest, error) { - u, err := url.Parse(uploadURLBase) - if err != nil { - t.Fatalf("unexpected error parsing pushLayer url: %v", err) - } - - u.RawQuery = url.Values{ - "_state": u.Query()["_state"], - }.Encode() - - uploadURL := u.String() - - digester := digest.Canonical.New() - - req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := http.DefaultClient.Do(req) - - return resp, digester.Digest(), err -} - -func pushChunk(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { - resp, dgst, err := doPushChunk(t, uploadURLBase, body) - if err != nil { - t.Fatalf("unexpected error doing push layer request: %v", err) - } - defer resp.Body.Close() - - checkResponse(t, "putting chunk", resp, http.StatusAccepted) - - if err != nil { - t.Fatalf("error generating sha256 digest of body") - } - - checkHeaders(t, resp, http.Header{ - "Range": []string{fmt.Sprintf("0-%d", length-1)}, - "Content-Length": []string{"0"}, - }) - - return resp.Header.Get("Location"), dgst -} - -func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { - if resp.StatusCode != expectedStatus { - t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) - maybeDumpResponse(t, resp) - - t.FailNow() - } -} - -// checkBodyHasErrorCodes ensures the body is an error body and has the -// expected error codes, returning the error structure, the json slice and a -// count of the errors by code. -func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) { - p, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unexpected error reading body %s: %v", msg, err) - } - - var errs errcode.Errors - if err := json.Unmarshal(p, &errs); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if len(errs) == 0 { - t.Fatalf("expected errors in response") - } - - // TODO(stevvooe): Shoot. The error setup is not working out. The content- - // type headers are being set after writing the status code. - // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { - // t.Fatalf("unexpected content type: %v != 'application/json'", - // resp.Header.Get("Content-Type")) - // } - - expected := map[errcode.ErrorCode]struct{}{} - counts := map[errcode.ErrorCode]int{} - - // Initialize map with zeros for expected - for _, code := range errorCodes { - expected[code] = struct{}{} - counts[code] = 0 - } - - for _, e := range errs { - err, ok := e.(errcode.ErrorCoder) - if !ok { - t.Fatalf("not an ErrorCoder: %#v", e) - } - if _, ok := expected[err.ErrorCode()]; !ok { - t.Fatalf("unexpected error code %v encountered during %s: %s ", err.ErrorCode(), msg, string(p)) - } - counts[err.ErrorCode()]++ - } - - // Ensure that counts of expected errors were all non-zero - for code := range expected { - if counts[code] == 0 { - t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p)) - } - } - - return errs, p, counts -} - -func maybeDumpResponse(t *testing.T, resp *http.Response) { - if d, err := httputil.DumpResponse(resp, true); err != nil { - t.Logf("error dumping response: %v", err) - } else { - t.Logf("response:\n%s", string(d)) - } -} - -// matchHeaders checks that the response has at least the headers. If not, the -// test will fail. If a passed in header value is "*", any non-zero value will -// suffice as a match. -func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { - for k, vs := range headers { - if resp.Header.Get(k) == "" { - t.Fatalf("response missing header %q", k) - } - - for _, v := range vs { - if v == "*" { - // Just ensure there is some value. - if len(resp.Header[http.CanonicalHeaderKey(k)]) > 0 { - continue - } - } - - for _, hv := range resp.Header[http.CanonicalHeaderKey(k)] { - if hv != v { - t.Fatalf("%+v %v header value not matched in response: %q != %q", resp.Header, k, hv, v) - } - } - } - } -} - -func checkErr(t *testing.T, err error, msg string) { - if err != nil { - t.Fatalf("unexpected error %s: %v", msg, err) - } -} - -func createRepository(env *testEnv, t *testing.T, imageName string, tag string) { - unsignedManifest := &manifest.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: imageName, - Tag: tag, - FSLayers: []manifest.FSLayer{ - { - BlobSum: "asdf", - }, - { - BlobSum: "qwer", - }, - }, - } - - // Push 2 random layers - expectedLayers := make(map[digest.Digest]io.ReadSeeker) - - for i := range unsignedManifest.FSLayers { - rs, dgstStr, err := testutil.CreateRandomTarFile() - - if err != nil { - t.Fatalf("error creating random layer %d: %v", i, err) - } - dgst := digest.Digest(dgstStr) - - expectedLayers[dgst] = rs - unsignedManifest.FSLayers[i].BlobSum = dgst - - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) - } - - signedManifest, err := manifest.Sign(unsignedManifest, env.pk) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - - payload, err := signedManifest.Payload() - checkErr(t, err, "getting manifest payload") - - dgst, err := digest.FromBytes(payload) - checkErr(t, err, "digesting manifest") - - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) - checkErr(t, err, "building manifest url") - - resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/app.go b/vendor/github.com/docker/distribution/registry/handlers/app.go deleted file mode 100644 index f60290d0..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/app.go +++ /dev/null @@ -1,800 +0,0 @@ -package handlers - -import ( - cryptorand "crypto/rand" - "expvar" - "fmt" - "math/rand" - "net" - "net/http" - "os" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/notifications" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - registrymiddleware "github.com/docker/distribution/registry/middleware/registry" - repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" - "github.com/docker/distribution/registry/proxy" - "github.com/docker/distribution/registry/storage" - memorycache "github.com/docker/distribution/registry/storage/cache/memory" - rediscache "github.com/docker/distribution/registry/storage/cache/redis" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/factory" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" - "github.com/garyburd/redigo/redis" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -// randomSecretSize is the number of random bytes to generate if no secret -// was specified. -const randomSecretSize = 32 - -// App is a global registry application object. Shared resources can be placed -// on this object that will be accessible from all requests. Any writable -// fields should be protected. -type App struct { - context.Context - - Config configuration.Configuration - - router *mux.Router // main application router, configured with dispatchers - driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry distribution.Namespace // registry is the primary registry backend for the app instance. - accessController auth.AccessController // main access controller for application - - // events contains notification related configuration. - events struct { - sink notifications.Sink - source notifications.SourceRecord - } - - redis *redis.Pool - - // true if this registry is configured as a pull through cache - isCache bool -} - -// NewApp takes a configuration and returns a configured app, ready to serve -// requests. The app only implements ServeHTTP and can be wrapped in other -// handlers accordingly. -func NewApp(ctx context.Context, configuration configuration.Configuration) *App { - app := &App{ - Config: configuration, - Context: ctx, - router: v2.RouterWithPrefix(configuration.HTTP.Prefix), - isCache: configuration.Proxy.RemoteURL != "", - } - - app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) - - // Register the handler dispatchers. - app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { - return http.HandlerFunc(apiBase) - }) - app.register(v2.RouteNameManifest, imageManifestDispatcher) - app.register(v2.RouteNameCatalog, catalogDispatcher) - app.register(v2.RouteNameTags, tagsDispatcher) - app.register(v2.RouteNameBlob, blobDispatcher) - app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) - app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) - - var err error - app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) - if err != nil { - // TODO(stevvooe): Move the creation of a service into a protected - // method, where this is created lazily. Its status can be queried via - // a health check. - panic(err) - } - - purgeConfig := uploadPurgeDefaultConfig() - if mc, ok := configuration.Storage["maintenance"]; ok { - for k, v := range mc { - switch k { - case "uploadpurging": - purgeConfig = v.(map[interface{}]interface{}) - } - } - - } - - startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) - - app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) - if err != nil { - panic(err) - } - - app.configureSecret(&configuration) - app.configureEvents(&configuration) - app.configureRedis(&configuration) - app.configureLogHook(&configuration) - - // configure deletion - var deleteEnabled bool - if d, ok := configuration.Storage["delete"]; ok { - e, ok := d["enabled"] - if ok { - if deleteEnabled, ok = e.(bool); !ok { - deleteEnabled = false - } - } - } - - // configure redirects - var redirectDisabled bool - if redirectConfig, ok := configuration.Storage["redirect"]; ok { - v := redirectConfig["disable"] - switch v := v.(type) { - case bool: - redirectDisabled = v - default: - panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) - } - - if redirectDisabled { - ctxu.GetLogger(app).Infof("backend redirection disabled") - } - } - - // configure storage caches - if cc, ok := configuration.Storage["cache"]; ok { - v, ok := cc["blobdescriptor"] - if !ok { - // Backwards compatible: "layerinfo" == "blobdescriptor" - v = cc["layerinfo"] - } - - switch v { - case "redis": - if app.redis == nil { - panic("redis configuration required to use for layerinfo cache") - } - app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis), deleteEnabled, !redirectDisabled, app.isCache) - ctxu.GetLogger(app).Infof("using redis blob descriptor cache") - case "inmemory": - app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), deleteEnabled, !redirectDisabled, app.isCache) - ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") - default: - if v != "" { - ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"]) - } - } - } - - if app.registry == nil { - // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil, deleteEnabled, !redirectDisabled, app.isCache) - } - - app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) - if err != nil { - panic(err) - } - - authType := configuration.Auth.Type() - - if authType != "" { - accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) - if err != nil { - panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) - } - app.accessController = accessController - ctxu.GetLogger(app).Debugf("configured %q access controller", authType) - } - - // configure as a pull through cache - if configuration.Proxy.RemoteURL != "" { - app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy) - if err != nil { - panic(err.Error()) - } - app.isCache = true - ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL) - } - - return app -} - -// register a handler with the application, by route name. The handler will be -// passed through the application filters and context will be constructed at -// request time. -func (app *App) register(routeName string, dispatch dispatchFunc) { - - // TODO(stevvooe): This odd dispatcher/route registration is by-product of - // some limitations in the gorilla/mux router. We are using it to keep - // routing consistent between the client and server, but we may want to - // replace it with manual routing and structure-based dispatch for better - // control over the request execution. - - app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) -} - -// configureEvents prepares the event sink for action. -func (app *App) configureEvents(configuration *configuration.Configuration) { - // Configure all of the endpoint sinks. - var sinks []notifications.Sink - for _, endpoint := range configuration.Notifications.Endpoints { - if endpoint.Disabled { - ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) - continue - } - - ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) - endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ - Timeout: endpoint.Timeout, - Threshold: endpoint.Threshold, - Backoff: endpoint.Backoff, - Headers: endpoint.Headers, - }) - - sinks = append(sinks, endpoint) - } - - // NOTE(stevvooe): Moving to a new queueing implementation is as easy as - // replacing broadcaster with a rabbitmq implementation. It's recommended - // that the registry instances also act as the workers to keep deployment - // simple. - app.events.sink = notifications.NewBroadcaster(sinks...) - - // Populate registry event source - hostname, err := os.Hostname() - if err != nil { - hostname = configuration.HTTP.Addr - } else { - // try to pick the port off the config - _, port, err := net.SplitHostPort(configuration.HTTP.Addr) - if err == nil { - hostname = net.JoinHostPort(hostname, port) - } - } - - app.events.source = notifications.SourceRecord{ - Addr: hostname, - InstanceID: ctxu.GetStringValue(app, "instance.id"), - } -} - -func (app *App) configureRedis(configuration *configuration.Configuration) { - if configuration.Redis.Addr == "" { - ctxu.GetLogger(app).Infof("redis not configured") - return - } - - pool := &redis.Pool{ - Dial: func() (redis.Conn, error) { - // TODO(stevvooe): Yet another use case for contextual timing. - ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) - - done := func(err error) { - logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", - ctxu.Since(ctx, "redis.connect.startedat")) - if err != nil { - logger.Errorf("redis: error connecting: %v", err) - } else { - logger.Infof("redis: connect %v", configuration.Redis.Addr) - } - } - - conn, err := redis.DialTimeout("tcp", - configuration.Redis.Addr, - configuration.Redis.DialTimeout, - configuration.Redis.ReadTimeout, - configuration.Redis.WriteTimeout) - if err != nil { - ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", - configuration.Redis.Addr, err) - done(err) - return nil, err - } - - // authorize the connection - if configuration.Redis.Password != "" { - if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - // select the database to use - if configuration.Redis.DB != 0 { - if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { - defer conn.Close() - done(err) - return nil, err - } - } - - done(nil) - return conn, nil - }, - MaxIdle: configuration.Redis.Pool.MaxIdle, - MaxActive: configuration.Redis.Pool.MaxActive, - IdleTimeout: configuration.Redis.Pool.IdleTimeout, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - // TODO(stevvooe): We can probably do something more interesting - // here with the health package. - _, err := c.Do("PING") - return err - }, - Wait: false, // if a connection is not avialable, proceed without cache. - } - - app.redis = pool - - // setup expvar - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { - return map[string]interface{}{ - "Config": configuration.Redis, - "Active": app.redis.ActiveCount(), - } - })) -} - -// configureLogHook prepares logging hook parameters. -func (app *App) configureLogHook(configuration *configuration.Configuration) { - entry, ok := ctxu.GetLogger(app).(*log.Entry) - if !ok { - // somehow, we are not using logrus - return - } - - logger := entry.Logger - - for _, configHook := range configuration.Log.Hooks { - if !configHook.Disabled { - switch configHook.Type { - case "mail": - hook := &logHook{} - hook.LevelsParam = configHook.Levels - hook.Mail = &mailer{ - Addr: configHook.MailOptions.SMTP.Addr, - Username: configHook.MailOptions.SMTP.Username, - Password: configHook.MailOptions.SMTP.Password, - Insecure: configHook.MailOptions.SMTP.Insecure, - From: configHook.MailOptions.From, - To: configHook.MailOptions.To, - } - logger.Hooks.Add(hook) - default: - } - } - } -} - -// configureSecret creates a random secret if a secret wasn't included in the -// configuration. -func (app *App) configureSecret(configuration *configuration.Configuration) { - if configuration.HTTP.Secret == "" { - var secretBytes [randomSecretSize]byte - if _, err := cryptorand.Read(secretBytes[:]); err != nil { - panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) - } - configuration.HTTP.Secret = string(secretBytes[:]) - ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") - } -} - -func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() // ensure that request body is always closed. - - // Instantiate an http context here so we can track the error codes - // returned by the request router. - ctx := defaultContextManager.context(app, w, r) - - defer func() { - status, ok := ctx.Value("http.response.status").(int) - if ok && status >= 200 && status <= 399 { - ctxu.GetResponseLogger(ctx).Infof("response completed") - } - }() - defer defaultContextManager.release(ctx) - - // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. - var err error - w, err = ctxu.GetResponseWriter(ctx) - if err != nil { - ctxu.GetLogger(ctx).Warnf("response writer not found in context") - } - - // Set a header with the Docker Distribution API Version for all responses. - w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") - app.router.ServeHTTP(w, r) -} - -// dispatchFunc takes a context and request and returns a constructed handler -// for the route. The dispatcher will use this to dynamically create request -// specific handlers for each endpoint without creating a new router for each -// request. -type dispatchFunc func(ctx *Context, r *http.Request) http.Handler - -// TODO(stevvooe): dispatchers should probably have some validation error -// chain with proper error reporting. - -// dispatcher returns a handler that constructs a request specific context and -// handler, using the dispatch factory function. -func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - context := app.context(w, r) - - if err := app.authorized(w, r, context); err != nil { - ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) - return - } - - // Add username to request logging - context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) - - if app.nameRequired(r) { - repository, err := app.registry.Repository(context, getName(context)) - - if err != nil { - ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) - - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) - case distribution.ErrRepositoryNameInvalid: - context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) - } - - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return - } - - // assign and decorate the authorized repository with an event bridge. - context.Repository = notifications.Listen( - repository, - app.eventBridge(context, r)) - - context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"]) - if err != nil { - ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return - } - } - - dispatch(context, r).ServeHTTP(w, r) - // Automated error response handling here. Handlers may return their - // own errors if they need different behavior (such as range errors - // for layer upload). - if context.Errors.Len() > 0 { - if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - - app.logError(context, context.Errors) - } - }) -} - -func (app *App) logError(context context.Context, errors errcode.Errors) { - for _, e1 := range errors { - var c ctxu.Context - - switch e1.(type) { - case errcode.Error: - e, _ := e1.(errcode.Error) - c = ctxu.WithValue(context, "err.code", e.Code) - c = ctxu.WithValue(c, "err.message", e.Code.Message()) - c = ctxu.WithValue(c, "err.detail", e.Detail) - case errcode.ErrorCode: - e, _ := e1.(errcode.ErrorCode) - c = ctxu.WithValue(context, "err.code", e) - c = ctxu.WithValue(c, "err.message", e.Message()) - default: - // just normal go 'error' - c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown) - c = ctxu.WithValue(c, "err.message", e1.Error()) - } - - c = ctxu.WithLogger(c, ctxu.GetLogger(c, - "err.code", - "err.message", - "err.detail")) - ctxu.GetResponseLogger(c).Errorf("response completed with error") - } -} - -// context constructs the context object for the application. This only be -// called once per request. -func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := defaultContextManager.context(app, w, r) - ctx = ctxu.WithVars(ctx, r) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, - "vars.name", - "vars.reference", - "vars.digest", - "vars.uuid")) - - context := &Context{ - App: app, - Context: ctx, - urlBuilder: v2.NewURLBuilderFromRequest(r), - } - - return context -} - -// authorized checks if the request can proceed with access to the requested -// repository. If it succeeds, the context may access the requested -// repository. An error will be returned if access is not available. -func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { - ctxu.GetLogger(context).Debug("authorizing request") - repo := getName(context) - - if app.accessController == nil { - return nil // access controller is not enabled. - } - - var accessRecords []auth.Access - - if repo != "" { - accessRecords = appendAccessRecords(accessRecords, r.Method, repo) - } else { - // Only allow the name not to be set on the base route. - if app.nameRequired(r) { - // For this to be properly secured, repo must always be set for a - // resource that may make a modification. The only condition under - // which name is not set and we still allow access is when the - // base route is accessed. This section prevents us from making - // that mistake elsewhere in the code, allowing any operation to - // proceed. - if err := errcode.ServeJSON(w, v2.ErrorCodeUnauthorized); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - return fmt.Errorf("forbidden: no repository name") - } - accessRecords = appendCatalogAccessRecord(accessRecords, r) - } - - ctx, err := app.accessController.Authorized(context.Context, accessRecords...) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - // Add the appropriate WWW-Auth header - err.SetHeaders(w) - - if err := errcode.ServeJSON(w, v2.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) - } - default: - // This condition is a potential security problem either in - // the configuration or whatever is backing the access - // controller. Just return a bad request with no information - // to avoid exposure. The request should not proceed. - ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) - w.WriteHeader(http.StatusBadRequest) - } - - return err - } - - // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context - // should be replaced by another, rather than replacing the context on a - // mutable object. - context.Context = ctx - return nil -} - -// eventBridge returns a bridge for the current request, configured with the -// correct actor and source. -func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { - actor := notifications.ActorRecord{ - Name: getUserName(ctx, r), - } - request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) - - return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) -} - -// nameRequired returns true if the route requires a name. -func (app *App) nameRequired(r *http.Request) bool { - route := mux.CurrentRoute(r) - routeName := route.GetName() - return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) -} - -// apiBase implements a simple yes-man for doing overall checks against the -// api. This can support auth roundtrips to support docker login. -func apiBase(w http.ResponseWriter, r *http.Request) { - const emptyJSON = "{}" - // Provide a simple /v2/ 200 OK response with empty json response. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) - - fmt.Fprint(w, emptyJSON) -} - -// appendAccessRecords checks the method and adds the appropriate Access records to the records list. -func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { - resource := auth.Resource{ - Type: "repository", - Name: repo, - } - - switch method { - case "GET", "HEAD": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }) - case "POST", "PUT", "PATCH": - records = append(records, - auth.Access{ - Resource: resource, - Action: "pull", - }, - auth.Access{ - Resource: resource, - Action: "push", - }) - case "DELETE": - // DELETE access requires full admin rights, which is represented - // as "*". This may not be ideal. - records = append(records, - auth.Access{ - Resource: resource, - Action: "*", - }) - } - return records -} - -// Add the access record for the catalog if it's our current route -func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { - route := mux.CurrentRoute(r) - routeName := route.GetName() - - if routeName == v2.RouteNameCatalog { - resource := auth.Resource{ - Type: "registry", - Name: "catalog", - } - - accessRecords = append(accessRecords, - auth.Access{ - Resource: resource, - Action: "*", - }) - } - return accessRecords -} - -// applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { - for _, mw := range middlewares { - rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) - if err != nil { - return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) - } - registry = rmw - } - return registry, nil - -} - -// applyRepoMiddleware wraps a repository with the configured middlewares -func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { - for _, mw := range middlewares { - rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) - if err != nil { - return nil, err - } - repository = rmw - } - return repository, nil -} - -// applyStorageMiddleware wraps a storage driver with the configured middlewares -func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { - for _, mw := range middlewares { - smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) - if err != nil { - return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) - } - driver = smw - } - return driver, nil -} - -// uploadPurgeDefaultConfig provides a default configuration for upload -// purging to be used in the absence of configuration in the -// confifuration file -func uploadPurgeDefaultConfig() map[interface{}]interface{} { - config := map[interface{}]interface{}{} - config["enabled"] = true - config["age"] = "168h" - config["interval"] = "24h" - config["dryrun"] = false - return config -} - -func badPurgeUploadConfig(reason string) { - panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) -} - -// startUploadPurger schedules a goroutine which will periodically -// check upload directories for old files and delete them -func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { - if config["enabled"] == false { - return - } - - var purgeAgeDuration time.Duration - var err error - purgeAge, ok := config["age"] - if ok { - ageStr, ok := purgeAge.(string) - if !ok { - badPurgeUploadConfig("age is not a string") - } - purgeAgeDuration, err = time.ParseDuration(ageStr) - if err != nil { - badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) - } - } else { - badPurgeUploadConfig("age missing") - } - - var intervalDuration time.Duration - interval, ok := config["interval"] - if ok { - intervalStr, ok := interval.(string) - if !ok { - badPurgeUploadConfig("interval is not a string") - } - - intervalDuration, err = time.ParseDuration(intervalStr) - if err != nil { - badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) - } - } else { - badPurgeUploadConfig("interval missing") - } - - var dryRunBool bool - dryRun, ok := config["dryrun"] - if ok { - dryRunBool, ok = dryRun.(bool) - if !ok { - badPurgeUploadConfig("cannot parse dryrun") - } - } else { - badPurgeUploadConfig("dryrun missing") - } - - go func() { - rand.Seed(time.Now().Unix()) - jitter := time.Duration(rand.Int()%60) * time.Minute - log.Infof("Starting upload purge in %s", jitter) - time.Sleep(jitter) - - for { - storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) - log.Infof("Starting upload purge in %s", intervalDuration) - time.Sleep(intervalDuration) - } - }() -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/app_test.go b/vendor/github.com/docker/distribution/registry/handlers/app_test.go deleted file mode 100644 index 6f597527..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/app_test.go +++ /dev/null @@ -1,277 +0,0 @@ -package handlers - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" - - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" - _ "github.com/docker/distribution/registry/auth/silly" - "github.com/docker/distribution/registry/storage" - memorycache "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "golang.org/x/net/context" -) - -// TestAppDispatcher builds an application with a test dispatcher and ensures -// that requests are properly dispatched and the handlers are constructed. -// This only tests the dispatch mechanism. The underlying dispatchers must be -// tested individually. -func TestAppDispatcher(t *testing.T) { - driver := inmemory.New() - ctx := context.Background() - app := &App{ - Config: configuration.Configuration{}, - Context: ctx, - router: v2.Router(), - driver: driver, - registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true, true, false), - } - server := httptest.NewServer(app) - router := v2.Router() - - serverURL, err := url.Parse(server.URL) - if err != nil { - t.Fatalf("error parsing server url: %v", err) - } - - varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { - return func(ctx *Context, r *http.Request) http.Handler { - // Always checks the same name context - if ctx.Repository.Name() != getName(ctx) { - t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar") - } - - // Check that we have all that is expected - for expectedK, expectedV := range expectedVars { - if ctx.Value(expectedK) != expectedV { - t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV) - } - } - - // Check that we only have variables that are expected - for k, v := range ctx.Value("vars").(map[string]string) { - _, ok := expectedVars[k] - - if !ok { // name is checked on context - // We have an unexpected key, fail - t.Fatalf("unexpected key %q in vars with value %q", k, v) - } - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - } - } - - // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string - unflatten := func(vars []string) map[string]string { - m := make(map[string]string) - for i := 0; i < len(vars)-1; i = i + 2 { - m[vars[i]] = vars[i+1] - } - - return m - } - - for _, testcase := range []struct { - endpoint string - vars []string - }{ - { - endpoint: v2.RouteNameManifest, - vars: []string{ - "name", "foo/bar", - "reference", "sometag", - }, - }, - { - endpoint: v2.RouteNameTags, - vars: []string{ - "name", "foo/bar", - }, - }, - { - endpoint: v2.RouteNameBlob, - vars: []string{ - "name", "foo/bar", - "digest", "tarsum.v1+bogus:abcdef0123456789", - }, - }, - { - endpoint: v2.RouteNameBlobUpload, - vars: []string{ - "name", "foo/bar", - }, - }, - { - endpoint: v2.RouteNameBlobUploadChunk, - vars: []string{ - "name", "foo/bar", - "uuid", "theuuid", - }, - }, - } { - app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) - route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) - u, err := route.URL(testcase.vars...) - - if err != nil { - t.Fatal(err) - } - - resp, err := http.Get(u.String()) - - if err != nil { - t.Fatal(err) - } - - if resp.StatusCode != http.StatusOK { - t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) - } - } -} - -// TestNewApp covers the creation of an application via NewApp with a -// configuration. -func TestNewApp(t *testing.T) { - ctx := context.Background() - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": nil, - }, - Auth: configuration.Auth{ - // For now, we simply test that new auth results in a viable - // application. - "silly": { - "realm": "realm-test", - "service": "service-test", - }, - }, - } - - // Mostly, with this test, given a sane configuration, we are simply - // ensuring that NewApp doesn't panic. We might want to tweak this - // behavior. - app := NewApp(ctx, config) - - server := httptest.NewServer(app) - builder, err := v2.NewURLBuilderFromString(server.URL) - if err != nil { - t.Fatalf("error creating urlbuilder: %v", err) - } - - baseURL, err := builder.BuildBaseURL() - if err != nil { - t.Fatalf("error creating baseURL: %v", err) - } - - // TODO(stevvooe): The rest of this test might belong in the API tests. - - // Just hit the app and make sure we get a 401 Unauthorized error. - req, err := http.Get(baseURL) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer req.Body.Close() - - if req.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected status code during request: %v", err) - } - - if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { - t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") - } - - expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" - if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { - t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) - } - - var errs errcode.Errors - dec := json.NewDecoder(req.Body) - if err := dec.Decode(&errs); err != nil { - t.Fatalf("error decoding error response: %v", err) - } - - err2, ok := errs[0].(errcode.ErrorCoder) - if !ok { - t.Fatalf("not an ErrorCoder: %#v", errs[0]) - } - if err2.ErrorCode() != v2.ErrorCodeUnauthorized { - t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), v2.ErrorCodeUnauthorized) - } -} - -// Test the access record accumulator -func TestAppendAccessRecords(t *testing.T) { - repo := "testRepo" - - expectedResource := auth.Resource{ - Type: "repository", - Name: repo, - } - - expectedPullRecord := auth.Access{ - Resource: expectedResource, - Action: "pull", - } - expectedPushRecord := auth.Access{ - Resource: expectedResource, - Action: "push", - } - expectedAllRecord := auth.Access{ - Resource: expectedResource, - Action: "*", - } - - records := []auth.Access{} - result := appendAccessRecords(records, "GET", repo) - expectedResult := []auth.Access{expectedPullRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "HEAD", repo) - expectedResult = []auth.Access{expectedPullRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "POST", repo) - expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "PUT", repo) - expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "PATCH", repo) - expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - - records = []auth.Access{} - result = appendAccessRecords(records, "DELETE", repo) - expectedResult = []auth.Access{expectedAllRecord} - if ok := reflect.DeepEqual(result, expectedResult); !ok { - t.Fatalf("Actual access record differs from expected") - } - -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/basicauth.go b/vendor/github.com/docker/distribution/registry/handlers/basicauth.go deleted file mode 100644 index 8727a3cd..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/basicauth.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.4 - -package handlers - -import ( - "net/http" -) - -func basicAuth(r *http.Request) (username, password string, ok bool) { - return r.BasicAuth() -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go b/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go deleted file mode 100644 index 6cf10a25..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/basicauth_prego14.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !go1.4 - -package handlers - -import ( - "encoding/base64" - "net/http" - "strings" -) - -// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we -// can compile on go1.3 and earlier. - -// BasicAuth returns the username and password provided in the request's -// Authorization header, if the request uses HTTP Basic Authentication. -// See RFC 2617, Section 2. -func basicAuth(r *http.Request) (username, password string, ok bool) { - auth := r.Header.Get("Authorization") - if auth == "" { - return - } - return parseBasicAuth(auth) -} - -// parseBasicAuth parses an HTTP Basic Authentication string. -// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). -func parseBasicAuth(auth string) (username, password string, ok bool) { - if !strings.HasPrefix(auth, "Basic ") { - return - } - c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) - if err != nil { - return - } - cs := string(c) - s := strings.IndexByte(cs, ':') - if s < 0 { - return - } - return cs[:s], cs[s+1:], true -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/blob.go b/vendor/github.com/docker/distribution/registry/handlers/blob.go deleted file mode 100644 index b7c06ea2..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/blob.go +++ /dev/null @@ -1,93 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// blobDispatcher uses the request context to build a blobHandler. -func blobDispatcher(ctx *Context, r *http.Request) http.Handler { - dgst, err := getDigest(ctx) - if err != nil { - - if err == errDigestNotAvailable { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - }) - } - - blobHandler := &blobHandler{ - Context: ctx, - Digest: dgst, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(blobHandler.GetBlob), - "HEAD": http.HandlerFunc(blobHandler.GetBlob), - "DELETE": http.HandlerFunc(blobHandler.DeleteBlob), - } -} - -// blobHandler serves http blob requests. -type blobHandler struct { - *Context - - Digest digest.Digest -} - -// GetBlob fetches the binary data from backend storage returns it in the -// response. -func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { - context.GetLogger(bh).Debug("GetBlob") - blobs := bh.Repository.Blobs(bh) - desc, err := blobs.Stat(bh, bh.Digest) - if err != nil { - if err == distribution.ErrBlobUnknown { - bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) - } else { - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - - if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { - context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} - -// DeleteBlob deletes a layer blob -func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { - context.GetLogger(bh).Debug("DeleteBlob") - - blobs := bh.Repository.Blobs(bh) - err := blobs.Delete(bh, bh.Digest) - if err != nil { - switch err { - case distribution.ErrBlobUnknown: - w.WriteHeader(http.StatusNotFound) - bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) - case distribution.ErrUnsupported: - w.WriteHeader(http.StatusMethodNotAllowed) - bh.Errors = append(bh.Errors, v2.ErrorCodeUnsupported) - default: - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown) - } - return - } - - w.Header().Set("Content-Length", "0") - w.WriteHeader(http.StatusAccepted) -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/blobupload.go b/vendor/github.com/docker/distribution/registry/handlers/blobupload.go deleted file mode 100644 index 1d1c1009..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/blobupload.go +++ /dev/null @@ -1,327 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - "net/url" - "os" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// blobUploadDispatcher constructs and returns the blob upload handler for the -// given request context. -func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { - buh := &blobUploadHandler{ - Context: ctx, - UUID: getUploadUUID(ctx), - } - - handler := http.Handler(handlers.MethodHandler{ - "POST": http.HandlerFunc(buh.StartBlobUpload), - "GET": http.HandlerFunc(buh.GetUploadStatus), - "HEAD": http.HandlerFunc(buh.GetUploadStatus), - "PATCH": http.HandlerFunc(buh.PatchBlobData), - "PUT": http.HandlerFunc(buh.PutBlobUploadComplete), - "DELETE": http.HandlerFunc(buh.CancelBlobUpload), - }) - - if buh.UUID != "" { - state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) - if err != nil { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - }) - } - buh.State = state - - if state.Name != ctx.Repository.Name() { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - }) - } - - if state.UUID != buh.UUID { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - }) - } - - blobs := ctx.Repository.Blobs(buh) - upload, err := blobs.Resume(buh, buh.UUID) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) - if err == distribution.ErrBlobUploadUnknown { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - }) - } - buh.Upload = upload - - if state.Offset > 0 { - // Seek the blob upload to the correct spot if it's non-zero. - // These error conditions should be rare and demonstrate really - // problems. We basically cancel the upload and tell the client to - // start over. - if nn, err := upload.Seek(buh.State.Offset, os.SEEK_SET); err != nil { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - upload.Cancel(buh) - }) - } else if nn != buh.State.Offset { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - upload.Cancel(buh) - }) - } - } - - handler = closeResources(handler, buh.Upload) - } - - return handler -} - -// blobUploadHandler handles the http blob upload process. -type blobUploadHandler struct { - *Context - - // UUID identifies the upload instance for the current request. Using UUID - // to key blob writers since this implementation uses UUIDs. - UUID string - - Upload distribution.BlobWriter - - State blobUploadState -} - -// StartBlobUpload begins the blob upload process and allocates a server-side -// blob writer session. -func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { - blobs := buh.Repository.Blobs(buh) - upload, err := blobs.Create(buh) - if err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - buh.Upload = upload - defer buh.Upload.Close() - - if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) - w.WriteHeader(http.StatusAccepted) -} - -// GetUploadStatus returns the status of a given upload, identified by id. -func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - // TODO(dmcgowan): Set last argument to false in blobUploadResponse when - // resumable upload is supported. This will enable returning a non-zero - // range for clients to begin uploading at an offset. - if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Docker-Upload-UUID", buh.UUID) - w.WriteHeader(http.StatusNoContent) -} - -// PatchBlobData writes data to an upload. -func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - ct := r.Header.Get("Content-Type") - if ct != "" && ct != "application/octet-stream" { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) - // TODO(dmcgowan): encode error - return - } - - // TODO(dmcgowan): support Content-Range header to seek and write range - - if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { - // copyFullPayload reports the error if necessary - return - } - - if err := buh.blobUploadResponse(w, r, false); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.WriteHeader(http.StatusAccepted) -} - -// PutBlobUploadComplete takes the final request of a blob upload. The -// request may include all the blob data or no blob data. Any data -// provided is received and verified. If successful, the blob is linked -// into the blob store and 201 Created is returned with the canonical -// url of the blob. -func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! - - if dgstStr == "" { - // no digest? return error, but allow retry. - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) - return - } - - dgst, err := digest.ParseDigest(dgstStr) - if err != nil { - // no digest? return error, but allow retry. - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) - return - } - - if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { - // copyFullPayload reports the error if necessary - return - } - - desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ - Digest: dgst, - - // TODO(stevvooe): This isn't wildly important yet, but we should - // really set the length and mediatype. For now, we can let the - // backend take care of this. - }) - - if err != nil { - switch err := err.(type) { - case distribution.ErrBlobInvalidDigest: - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - default: - switch err { - case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - default: - ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - - } - - // Clean up the backend blob data if there was an error. - if err := buh.Upload.Cancel(buh); err != nil { - // If the cleanup fails, all we can do is observe and report. - ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) - } - - return - } - - // Build our canonical blob url - blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) - if err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Location", blobURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - w.WriteHeader(http.StatusCreated) -} - -// CancelBlobUpload cancels an in-progress upload of a blob. -func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { - if buh.Upload == nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) - return - } - - w.Header().Set("Docker-Upload-UUID", buh.UUID) - if err := buh.Upload.Cancel(buh); err != nil { - ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - - w.WriteHeader(http.StatusNoContent) -} - -// blobUploadResponse provides a standard request for uploading blobs and -// chunk responses. This sets the correct headers but the response status is -// left to the caller. The fresh argument is used to ensure that new blob -// uploads always start at a 0 offset. This allows disabling resumable push by -// always returning a 0 offset on check status. -func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { - - var offset int64 - if !fresh { - var err error - offset, err = buh.Upload.Seek(0, os.SEEK_CUR) - if err != nil { - ctxu.GetLogger(buh).Errorf("unable get current offset of blob upload: %v", err) - return err - } - } - - // TODO(stevvooe): Need a better way to manage the upload state automatically. - buh.State.Name = buh.Repository.Name() - buh.State.UUID = buh.Upload.ID() - buh.State.Offset = offset - buh.State.StartedAt = buh.Upload.StartedAt() - - token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) - if err != nil { - ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) - return err - } - - uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( - buh.Repository.Name(), buh.Upload.ID(), - url.Values{ - "_state": []string{token}, - }) - if err != nil { - ctxu.GetLogger(buh).Infof("error building upload url: %s", err) - return err - } - - endRange := offset - if endRange > 0 { - endRange = endRange - 1 - } - - w.Header().Set("Docker-Upload-UUID", buh.UUID) - w.Header().Set("Location", uploadURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/catalog.go b/vendor/github.com/docker/distribution/registry/handlers/catalog.go deleted file mode 100644 index 6ec1fe55..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/catalog.go +++ /dev/null @@ -1,95 +0,0 @@ -package handlers - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/gorilla/handlers" -) - -const maximumReturnedEntries = 100 - -func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { - catalogHandler := &catalogHandler{ - Context: ctx, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(catalogHandler.GetCatalog), - } -} - -type catalogHandler struct { - *Context -} - -type catalogAPIResponse struct { - Repositories []string `json:"repositories"` -} - -func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { - var moreEntries = true - - q := r.URL.Query() - lastEntry := q.Get("last") - maxEntries, err := strconv.Atoi(q.Get("n")) - if err != nil || maxEntries < 0 { - maxEntries = maximumReturnedEntries - } - - repos := make([]string, maxEntries) - - filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) - if err == io.EOF { - moreEntries = false - } else if err != nil { - ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - // Add a link header if there are more entries to retrieve - if moreEntries { - lastEntry = repos[len(repos)-1] - urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) - if err != nil { - ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } - w.Header().Set("Link", urlStr) - } - - enc := json.NewEncoder(w) - if err := enc.Encode(catalogAPIResponse{ - Repositories: repos[0:filled], - }); err != nil { - ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} - -// Use the original URL from the request to create a new URL for -// the link header -func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { - calledURL, err := url.Parse(origURL) - if err != nil { - return "", err - } - - v := url.Values{} - v.Add("n", strconv.Itoa(maxEntries)) - v.Add("last", lastEntry) - - calledURL.RawQuery = v.Encode() - - calledURL.Fragment = "" - urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) - - return urlStr, nil -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/context.go b/vendor/github.com/docker/distribution/registry/handlers/context.go deleted file mode 100644 index 85a17123..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/context.go +++ /dev/null @@ -1,151 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - "sync" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "golang.org/x/net/context" -) - -// Context should contain the request specific context for use in across -// handlers. Resources that don't need to be shared across handlers should not -// be on this object. -type Context struct { - // App points to the application structure that created this context. - *App - context.Context - - // Repository is the repository for the current request. All requests - // should be scoped to a single repository. This field may be nil. - Repository distribution.Repository - - // Errors is a collection of errors encountered during the request to be - // returned to the client API. If errors are added to the collection, the - // handler *must not* start the response via http.ResponseWriter. - Errors errcode.Errors - - urlBuilder *v2.URLBuilder - - // TODO(stevvooe): The goal is too completely factor this context and - // dispatching out of the web application. Ideally, we should lean on - // context.Context for injection of these resources. -} - -// Value overrides context.Context.Value to ensure that calls are routed to -// correct context. -func (ctx *Context) Value(key interface{}) interface{} { - return ctx.Context.Value(key) -} - -func getName(ctx context.Context) (name string) { - return ctxu.GetStringValue(ctx, "vars.name") -} - -func getReference(ctx context.Context) (reference string) { - return ctxu.GetStringValue(ctx, "vars.reference") -} - -var errDigestNotAvailable = fmt.Errorf("digest not available in context") - -func getDigest(ctx context.Context) (dgst digest.Digest, err error) { - dgstStr := ctxu.GetStringValue(ctx, "vars.digest") - - if dgstStr == "" { - ctxu.GetLogger(ctx).Errorf("digest not available") - return "", errDigestNotAvailable - } - - d, err := digest.ParseDigest(dgstStr) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) - return "", err - } - - return d, nil -} - -func getUploadUUID(ctx context.Context) (uuid string) { - return ctxu.GetStringValue(ctx, "vars.uuid") -} - -// getUserName attempts to resolve a username from the context and request. If -// a username cannot be resolved, the empty string is returned. -func getUserName(ctx context.Context, r *http.Request) string { - username := ctxu.GetStringValue(ctx, "auth.user.name") - - // Fallback to request user with basic auth - if username == "" { - var ok bool - uname, _, ok := basicAuth(r) - if ok { - username = uname - } - } - - return username -} - -// contextManager allows us to associate net/context.Context instances with a -// request, based on the memory identity of http.Request. This prepares http- -// level context, which is not application specific. If this is called, -// (*contextManager).release must be called on the context when the request is -// completed. -// -// Providing this circumvents a lot of necessity for dispatchers with the -// benefit of instantiating the request context much earlier. -// -// TODO(stevvooe): Consider making this facility a part of the context package. -type contextManager struct { - contexts map[*http.Request]context.Context - mu sync.Mutex -} - -// defaultContextManager is just a global instance to register request contexts. -var defaultContextManager = newContextManager() - -func newContextManager() *contextManager { - return &contextManager{ - contexts: make(map[*http.Request]context.Context), - } -} - -// context either returns a new context or looks it up in the manager. -func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { - cm.mu.Lock() - defer cm.mu.Unlock() - - ctx, ok := cm.contexts[r] - if ok { - return ctx - } - - if parent == nil { - parent = ctxu.Background() - } - - ctx = ctxu.WithRequest(parent, r) - ctx, w = ctxu.WithResponseWriter(ctx, w) - ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) - cm.contexts[r] = ctx - - return ctx -} - -// releases frees any associated with resources from request. -func (cm *contextManager) release(ctx context.Context) { - cm.mu.Lock() - defer cm.mu.Unlock() - - r, err := ctxu.GetRequest(ctx) - if err != nil { - ctxu.GetLogger(ctx).Errorf("no request found in context during release") - return - } - delete(cm.contexts, r) -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/helpers.go b/vendor/github.com/docker/distribution/registry/handlers/helpers.go deleted file mode 100644 index 1f9a8ee1..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/helpers.go +++ /dev/null @@ -1,62 +0,0 @@ -package handlers - -import ( - "errors" - "io" - "net/http" - - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/api/errcode" -) - -// closeResources closes all the provided resources after running the target -// handler. -func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - for _, closer := range closers { - defer closer.Close() - } - handler.ServeHTTP(w, r) - }) -} - -// copyFullPayload copies the payload of a HTTP request to destWriter. If it -// receives less content than expected, and the client disconnected during the -// upload, it avoids sending a 400 error to keep the logs cleaner. -func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { - // Get a channel that tells us if the client disconnects - var clientClosed <-chan bool - if notifier, ok := responseWriter.(http.CloseNotifier); ok { - clientClosed = notifier.CloseNotify() - } else { - panic("the ResponseWriter does not implement CloseNotifier") - } - - // Read in the data, if any. - copied, err := io.Copy(destWriter, r.Body) - if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { - // Didn't recieve as much content as expected. Did the client - // disconnect during the request? If so, avoid returning a 400 - // error to keep the logs cleaner. - select { - case <-clientClosed: - // Set the response code to "499 Client Closed Request" - // Even though the connection has already been closed, - // this causes the logger to pick up a 499 error - // instead of showing 0 for the HTTP status. - responseWriter.WriteHeader(499) - - ctxu.GetLogger(context).Error("client disconnected during " + action) - return errors.New("client disconnected") - default: - } - } - - if err != nil { - ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) - *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) - return err - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/hmac.go b/vendor/github.com/docker/distribution/registry/handlers/hmac.go deleted file mode 100644 index 1725d240..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/hmac.go +++ /dev/null @@ -1,72 +0,0 @@ -package handlers - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "fmt" - "time" -) - -// blobUploadState captures the state serializable state of the blob upload. -type blobUploadState struct { - // name is the primary repository under which the blob will be linked. - Name string - - // UUID identifies the upload. - UUID string - - // offset contains the current progress of the upload. - Offset int64 - - // StartedAt is the original start time of the upload. - StartedAt time.Time -} - -type hmacKey string - -// unpackUploadState unpacks and validates the blob upload state from the -// token, using the hmacKey secret. -func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { - var state blobUploadState - - tokenBytes, err := base64.URLEncoding.DecodeString(token) - if err != nil { - return state, err - } - mac := hmac.New(sha256.New, []byte(secret)) - - if len(tokenBytes) < mac.Size() { - return state, fmt.Errorf("Invalid token") - } - - macBytes := tokenBytes[:mac.Size()] - messageBytes := tokenBytes[mac.Size():] - - mac.Write(messageBytes) - if !hmac.Equal(mac.Sum(nil), macBytes) { - return state, fmt.Errorf("Invalid token") - } - - if err := json.Unmarshal(messageBytes, &state); err != nil { - return state, err - } - - return state, nil -} - -// packUploadState packs the upload state signed with and hmac digest using -// the hmacKey secret, encoding to url safe base64. The resulting token can be -// used to share data with minimized risk of external tampering. -func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { - mac := hmac.New(sha256.New, []byte(secret)) - p, err := json.Marshal(lus) - if err != nil { - return "", err - } - - mac.Write(p) - - return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go b/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go deleted file mode 100644 index 366c7279..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/hmac_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package handlers - -import "testing" - -var blobUploadStates = []blobUploadState{ - { - Name: "hello", - UUID: "abcd-1234-qwer-0987", - Offset: 0, - }, - { - Name: "hello-world", - UUID: "abcd-1234-qwer-0987", - Offset: 0, - }, - { - Name: "h3ll0_w0rld", - UUID: "abcd-1234-qwer-0987", - Offset: 1337, - }, - { - Name: "ABCDEFG", - UUID: "ABCD-1234-QWER-0987", - Offset: 1234567890, - }, - { - Name: "this-is-A-sort-of-Long-name-for-Testing", - UUID: "dead-1234-beef-0987", - Offset: 8675309, - }, -} - -var secrets = []string{ - "supersecret", - "12345", - "a", - "SuperSecret", - "Sup3r... S3cr3t!", - "This is a reasonably long secret key that is used for the purpose of testing.", - "\u2603+\u2744", // snowman+snowflake -} - -// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and -// validates that the tokens can be used to reconstruct the proper upload state. -func TestLayerUploadTokens(t *testing.T) { - secret := hmacKey("supersecret") - - for _, testcase := range blobUploadStates { - token, err := secret.packUploadState(testcase) - if err != nil { - t.Fatal(err) - } - - lus, err := secret.unpackUploadState(token) - if err != nil { - t.Fatal(err) - } - - assertBlobUploadStateEquals(t, testcase, lus) - } -} - -// TestHMACValidate ensures that any HMAC token providers are compatible if and -// only if they share the same secret. -func TestHMACValidation(t *testing.T) { - for _, secret := range secrets { - secret1 := hmacKey(secret) - secret2 := hmacKey(secret) - badSecret := hmacKey("DifferentSecret") - - for _, testcase := range blobUploadStates { - token, err := secret1.packUploadState(testcase) - if err != nil { - t.Fatal(err) - } - - lus, err := secret2.unpackUploadState(token) - if err != nil { - t.Fatal(err) - } - - assertBlobUploadStateEquals(t, testcase, lus) - - _, err = badSecret.unpackUploadState(token) - if err == nil { - t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) - } - - badToken, err := badSecret.packUploadState(lus) - if err != nil { - t.Fatal(err) - } - - _, err = secret1.unpackUploadState(badToken) - if err == nil { - t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) - } - - _, err = secret2.unpackUploadState(badToken) - if err == nil { - t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) - } - } - } -} - -func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) { - if expected.Name != received.Name { - t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) - } - if expected.UUID != received.UUID { - t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) - } - if expected.Offset != received.Offset { - t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) - } -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/hooks.go b/vendor/github.com/docker/distribution/registry/handlers/hooks.go deleted file mode 100644 index 7bbab4f8..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/hooks.go +++ /dev/null @@ -1,53 +0,0 @@ -package handlers - -import ( - "bytes" - "errors" - "fmt" - "strings" - "text/template" - - "github.com/Sirupsen/logrus" -) - -// logHook is for hooking Panic in web application -type logHook struct { - LevelsParam []string - Mail *mailer -} - -// Fire forwards an error to LogHook -func (hook *logHook) Fire(entry *logrus.Entry) error { - addr := strings.Split(hook.Mail.Addr, ":") - if len(addr) != 2 { - return errors.New("Invalid Mail Address") - } - host := addr[0] - subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) - - html := ` - {{.Message}} - - {{range $key, $value := .Data}} - {{$key}}: {{$value}} - {{end}} - ` - b := bytes.NewBuffer(make([]byte, 0)) - t := template.Must(template.New("mail body").Parse(html)) - if err := t.Execute(b, entry); err != nil { - return err - } - body := fmt.Sprintf("%s", b) - - return hook.Mail.sendMail(subject, body) -} - -// Levels contains hook levels to be catched -func (hook *logHook) Levels() []logrus.Level { - levels := []logrus.Level{} - for _, v := range hook.LevelsParam { - lv, _ := logrus.ParseLevel(v) - levels = append(levels, lv) - } - return levels -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/images.go b/vendor/github.com/docker/distribution/registry/handlers/images.go deleted file mode 100644 index dbe7b706..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/images.go +++ /dev/null @@ -1,251 +0,0 @@ -package handlers - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" - "golang.org/x/net/context" -) - -// imageManifestDispatcher takes the request context and builds the -// appropriate handler for handling image manifest requests. -func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { - imageManifestHandler := &imageManifestHandler{ - Context: ctx, - } - reference := getReference(ctx) - dgst, err := digest.ParseDigest(reference) - if err != nil { - // We just have a tag - imageManifestHandler.Tag = reference - } else { - imageManifestHandler.Digest = dgst - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), - "PUT": http.HandlerFunc(imageManifestHandler.PutImageManifest), - "DELETE": http.HandlerFunc(imageManifestHandler.DeleteImageManifest), - } -} - -// imageManifestHandler handles http operations on image manifests. -type imageManifestHandler struct { - *Context - - // One of tag or digest gets set, depending on what is present in context. - Tag string - Digest digest.Digest -} - -// GetImageManifest fetches the image manifest from the storage backend, if it exists. -func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("GetImageManifest") - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - var sm *manifest.SignedManifest - if imh.Tag != "" { - sm, err = manifests.GetByTag(imh.Tag) - } else { - if etagMatch(r, imh.Digest.String()) { - w.WriteHeader(http.StatusNotModified) - return - } - sm, err = manifests.Get(imh.Digest) - } - - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - - // Get the digest, if we don't already have it. - if imh.Digest == "" { - dgst, err := digestManifest(imh, sm) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - return - } - if etagMatch(r, dgst.String()) { - w.WriteHeader(http.StatusNotModified) - return - } - - imh.Digest = dgst - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(sm.Raw))) - w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) - w.Write(sm.Raw) -} - -func etagMatch(r *http.Request, etag string) bool { - for _, headerVal := range r.Header["If-None-Match"] { - if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted - return true - } - } - return false -} - -// PutImageManifest validates and stores and image in the registry. -func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("PutImageManifest") - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - var jsonBuf bytes.Buffer - if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { - // copyFullPayload reports the error if necessary - return - } - - var manifest manifest.SignedManifest - if err := json.Unmarshal(jsonBuf.Bytes(), &manifest); err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return - } - - dgst, err := digestManifest(imh, &manifest) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - return - } - - // Validate manifest tag or digest matches payload - if imh.Tag != "" { - if manifest.Tag != imh.Tag { - ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) - imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid) - return - } - - imh.Digest = dgst - } else if imh.Digest != "" { - if dgst != imh.Digest { - ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) - return - } - } else { - imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) - return - } - - if err := manifests.Put(&manifest); err != nil { - // TODO(stevvooe): These error handling switches really need to be - // handled by an app global mapper. - switch err := err.(type) { - case distribution.ErrManifestVerification: - for _, verificationError := range err { - switch verificationError := verificationError.(type) { - case distribution.ErrManifestBlobUnknown: - imh.Errors = append(imh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(verificationError.Digest)) - case distribution.ErrManifestUnverified: - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) - default: - if verificationError == digest.ErrDigestInvalidFormat { - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) - } - } - } - default: - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - - return - } - - // Construct a canonical url for the uploaded manifest. - location, err := imh.urlBuilder.BuildManifestURL(imh.Repository.Name(), imh.Digest.String()) - if err != nil { - // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to - // happen. We'll log the error here but proceed as if it worked. Worst - // case, we set an empty location header. - ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) - } - - w.Header().Set("Location", location) - w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.WriteHeader(http.StatusCreated) -} - -// DeleteImageManifest removes the manifest with the given digest from the registry. -func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("DeleteImageManifest") - - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - err = manifests.Delete(imh.Digest) - if err != nil { - switch err { - case digest.ErrDigestUnsupported: - case digest.ErrDigestInvalidFormat: - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) - return - case distribution.ErrBlobUnknown: - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) - w.WriteHeader(http.StatusNotFound) - return - case distribution.ErrUnsupported: - imh.Errors = append(imh.Errors, v2.ErrorCodeUnsupported) - w.WriteHeader(http.StatusMethodNotAllowed) - default: - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) - w.WriteHeader(http.StatusBadRequest) - return - } - } - - w.WriteHeader(http.StatusAccepted) -} - -// digestManifest takes a digest of the given manifest. This belongs somewhere -// better but we'll wait for a refactoring cycle to find that real somewhere. -func digestManifest(ctx context.Context, sm *manifest.SignedManifest) (digest.Digest, error) { - p, err := sm.Payload() - if err != nil { - if !strings.Contains(err.Error(), "missing signature key") { - ctxu.GetLogger(ctx).Errorf("error getting manifest payload: %v", err) - return "", err - } - - // NOTE(stevvooe): There are no signatures but we still have a - // payload. The request will fail later but this is not the - // responsibility of this part of the code. - p = sm.Raw - } - - dgst, err := digest.FromBytes(p) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error digesting manifest: %v", err) - return "", err - } - - return dgst, err -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/mail.go b/vendor/github.com/docker/distribution/registry/handlers/mail.go deleted file mode 100644 index 39244909..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/mail.go +++ /dev/null @@ -1,45 +0,0 @@ -package handlers - -import ( - "errors" - "net/smtp" - "strings" -) - -// mailer provides fields of email configuration for sending. -type mailer struct { - Addr, Username, Password, From string - Insecure bool - To []string -} - -// sendMail allows users to send email, only if mail parameters is configured correctly. -func (mail *mailer) sendMail(subject, message string) error { - addr := strings.Split(mail.Addr, ":") - if len(addr) != 2 { - return errors.New("Invalid Mail Address") - } - host := addr[0] - msg := []byte("To:" + strings.Join(mail.To, ";") + - "\r\nFrom: " + mail.From + - "\r\nSubject: " + subject + - "\r\nContent-Type: text/plain\r\n\r\n" + - message) - auth := smtp.PlainAuth( - "", - mail.Username, - mail.Password, - host, - ) - err := smtp.SendMail( - mail.Addr, - auth, - mail.From, - mail.To, - []byte(msg), - ) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/handlers/tags.go b/vendor/github.com/docker/distribution/registry/handlers/tags.go deleted file mode 100644 index 54725585..00000000 --- a/vendor/github.com/docker/distribution/registry/handlers/tags.go +++ /dev/null @@ -1,64 +0,0 @@ -package handlers - -import ( - "encoding/json" - "net/http" - - "github.com/docker/distribution" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// tagsDispatcher constructs the tags handler api endpoint. -func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { - tagsHandler := &tagsHandler{ - Context: ctx, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(tagsHandler.GetTags), - } -} - -// tagsHandler handles requests for lists of tags under a repository name. -type tagsHandler struct { - *Context -} - -type tagsAPIResponse struct { - Name string `json:"name"` - Tags []string `json:"tags"` -} - -// GetTags returns a json list of tags for a specific image name. -func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - manifests, err := th.Repository.Manifests(th) - if err != nil { - th.Errors = append(th.Errors, err) - return - } - - tags, err := manifests.Tags() - if err != nil { - switch err := err.(type) { - case distribution.ErrRepositoryUnknown: - th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name()})) - default: - th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - enc := json.NewEncoder(w) - if err := enc.Encode(tagsAPIResponse{ - Name: th.Repository.Name(), - Tags: tags, - }); err != nil { - th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } -} diff --git a/vendor/github.com/docker/distribution/registry/listener/listener.go b/vendor/github.com/docker/distribution/registry/listener/listener.go deleted file mode 100644 index b93a7a63..00000000 --- a/vendor/github.com/docker/distribution/registry/listener/listener.go +++ /dev/null @@ -1,74 +0,0 @@ -package listener - -import ( - "fmt" - "net" - "os" - "time" -) - -// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted -// connections. It's used by ListenAndServe and ListenAndServeTLS so -// dead TCP connections (e.g. closing laptop mid-download) eventually -// go away. -// it is a plain copy-paste from net/http/server.go -type tcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -// NewListener announces on laddr and net. Accepted values of the net are -// 'unix' and 'tcp' -func NewListener(net, laddr string) (net.Listener, error) { - switch net { - case "unix": - return newUnixListener(laddr) - case "tcp", "": // an empty net means tcp - return newTCPListener(laddr) - default: - return nil, fmt.Errorf("unknown address type %s", net) - } -} - -func newUnixListener(laddr string) (net.Listener, error) { - fi, err := os.Stat(laddr) - if err == nil { - // the file exists. - // try to remove it if it's a socket - if !isSocket(fi.Mode()) { - return nil, fmt.Errorf("file %s exists and is not a socket", laddr) - } - - if err := os.Remove(laddr); err != nil { - return nil, err - } - } else if !os.IsNotExist(err) { - // we can't do stat on the file. - // it means we can not remove it - return nil, err - } - - return net.Listen("unix", laddr) -} - -func isSocket(m os.FileMode) bool { - return m&os.ModeSocket != 0 -} - -func newTCPListener(laddr string) (net.Listener, error) { - ln, err := net.Listen("tcp", laddr) - if err != nil { - return nil, err - } - - return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil -} diff --git a/vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go b/vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go deleted file mode 100644 index 7535c6db..00000000 --- a/vendor/github.com/docker/distribution/registry/middleware/registry/middleware.go +++ /dev/null @@ -1,40 +0,0 @@ -package middleware - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -// InitFunc is the type of a RegistryMiddleware factory function and is -// used to register the constructor for different RegistryMiddleware backends. -type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) - -var middlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a RegistryMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if middlewares == nil { - middlewares = make(map[string]InitFunc) - } - if _, exists := middlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - middlewares[name] = initFunc - - return nil -} - -// Get constructs a RegistryMiddleware with the given options using the named backend. -func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { - if middlewares != nil { - if initFunc, exists := middlewares[name]; exists { - return initFunc(ctx, registry, options) - } - } - - return nil, fmt.Errorf("no registry middleware registered with name: %s", name) -} diff --git a/vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go b/vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go deleted file mode 100644 index 27b42aec..00000000 --- a/vendor/github.com/docker/distribution/registry/middleware/repository/middleware.go +++ /dev/null @@ -1,40 +0,0 @@ -package middleware - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -// InitFunc is the type of a RepositoryMiddleware factory function and is -// used to register the constructor for different RepositoryMiddleware backends. -type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) - -var middlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a RepositoryMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if middlewares == nil { - middlewares = make(map[string]InitFunc) - } - if _, exists := middlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - middlewares[name] = initFunc - - return nil -} - -// Get constructs a RepositoryMiddleware with the given options using the named backend. -func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { - if middlewares != nil { - if initFunc, exists := middlewares[name]; exists { - return initFunc(ctx, repository, options) - } - } - - return nil, fmt.Errorf("no repository middleware registered with name: %s", name) -} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go b/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go deleted file mode 100644 index e4bec75a..00000000 --- a/vendor/github.com/docker/distribution/registry/proxy/proxyauth.go +++ /dev/null @@ -1,54 +0,0 @@ -package proxy - -import ( - "net/http" - "net/url" - - "github.com/docker/distribution/registry/client/auth" -) - -const tokenURL = "https://auth.docker.io/token" - -type userpass struct { - username string - password string -} - -type credentials struct { - creds map[string]userpass -} - -func (c credentials) Basic(u *url.URL) (string, string) { - up := c.creds[u.String()] - - return up.username, up.password -} - -// ConfigureAuth authorizes with the upstream registry -func ConfigureAuth(remoteURL, username, password string, cm auth.ChallengeManager) (auth.CredentialStore, error) { - if err := ping(cm, remoteURL+"/v2/", "Docker-Distribution-Api-Version"); err != nil { - return nil, err - } - - creds := map[string]userpass{ - tokenURL: { - username: username, - password: password, - }, - } - return credentials{creds: creds}, nil -} - -func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error { - resp, err := http.Get(endpoint) - if err != nil { - return err - } - defer resp.Body.Close() - - if err := manager.AddResponse(resp); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go deleted file mode 100644 index b480a111..00000000 --- a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore.go +++ /dev/null @@ -1,214 +0,0 @@ -package proxy - -import ( - "io" - "net/http" - "strconv" - "sync" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/proxy/scheduler" -) - -// todo(richardscothern): from cache control header or config file -const blobTTL = time.Duration(24 * 7 * time.Hour) - -type proxyBlobStore struct { - localStore distribution.BlobStore - remoteStore distribution.BlobService - scheduler *scheduler.TTLExpirationScheduler -} - -var _ distribution.BlobStore = proxyBlobStore{} - -type inflightBlob struct { - refCount int - bw distribution.BlobWriter -} - -// inflight tracks currently downloading blobs -var inflight = make(map[digest.Digest]*inflightBlob) - -// mu protects inflight -var mu sync.Mutex - -func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { - w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) - w.Header().Set("Content-Type", mediaType) - w.Header().Set("Docker-Content-Digest", digest.String()) - w.Header().Set("Etag", digest.String()) -} - -func (pbs proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - desc, err := pbs.localStore.Stat(ctx, dgst) - if err != nil && err != distribution.ErrBlobUnknown { - return err - } - - if err == nil { - proxyMetrics.BlobPush(uint64(desc.Size)) - return pbs.localStore.ServeBlob(ctx, w, r, dgst) - } - - desc, err = pbs.remoteStore.Stat(ctx, dgst) - if err != nil { - return err - } - - remoteReader, err := pbs.remoteStore.Open(ctx, dgst) - if err != nil { - return err - } - - bw, isNew, cleanup, err := getOrCreateBlobWriter(ctx, pbs.localStore, desc) - if err != nil { - return err - } - defer cleanup() - - if isNew { - go func() { - err := streamToStorage(ctx, remoteReader, desc, bw) - if err != nil { - context.GetLogger(ctx).Error(err) - } - - proxyMetrics.BlobPull(uint64(desc.Size)) - }() - err := streamToClient(ctx, w, desc, bw) - if err != nil { - return err - } - - proxyMetrics.BlobPush(uint64(desc.Size)) - pbs.scheduler.AddBlob(dgst.String(), blobTTL) - return nil - } - - err = streamToClient(ctx, w, desc, bw) - if err != nil { - return err - } - proxyMetrics.BlobPush(uint64(desc.Size)) - return nil -} - -type cleanupFunc func() - -// getOrCreateBlobWriter will track which blobs are currently being downloaded and enable client requesting -// the same blob concurrently to read from the existing stream. -func getOrCreateBlobWriter(ctx context.Context, blobs distribution.BlobService, desc distribution.Descriptor) (distribution.BlobWriter, bool, cleanupFunc, error) { - mu.Lock() - defer mu.Unlock() - dgst := desc.Digest - - cleanup := func() { - mu.Lock() - defer mu.Unlock() - inflight[dgst].refCount-- - - if inflight[dgst].refCount == 0 { - defer delete(inflight, dgst) - _, err := inflight[dgst].bw.Commit(ctx, desc) - if err != nil { - // There is a narrow race here where Commit can be called while this blob's TTL is expiring - // and its being removed from storage. In that case, the client stream will continue - // uninterruped and the blob will be pulled through on the next request, so just log it - context.GetLogger(ctx).Errorf("Error committing blob: %q", err) - } - - } - } - - var bw distribution.BlobWriter - _, ok := inflight[dgst] - if ok { - bw = inflight[dgst].bw - inflight[dgst].refCount++ - return bw, false, cleanup, nil - } - - var err error - bw, err = blobs.Create(ctx) - if err != nil { - return nil, false, nil, err - } - - inflight[dgst] = &inflightBlob{refCount: 1, bw: bw} - return bw, true, cleanup, nil -} - -func streamToStorage(ctx context.Context, remoteReader distribution.ReadSeekCloser, desc distribution.Descriptor, bw distribution.BlobWriter) error { - _, err := io.CopyN(bw, remoteReader, desc.Size) - if err != nil { - return err - } - - return nil -} - -func streamToClient(ctx context.Context, w http.ResponseWriter, desc distribution.Descriptor, bw distribution.BlobWriter) error { - setResponseHeaders(w, desc.Size, desc.MediaType, desc.Digest) - - reader, err := bw.Reader() - if err != nil { - return err - } - defer reader.Close() - teeReader := io.TeeReader(reader, w) - buf := make([]byte, 32768, 32786) - var soFar int64 - for { - rd, err := teeReader.Read(buf) - if err == nil || err == io.EOF { - soFar += int64(rd) - if soFar < desc.Size { - // buffer underflow, keep trying - continue - } - return nil - } - return err - } -} - -func (pbs proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - desc, err := pbs.localStore.Stat(ctx, dgst) - if err == nil { - return desc, err - } - - if err != distribution.ErrBlobUnknown { - return distribution.Descriptor{}, err - } - - return pbs.remoteStore.Stat(ctx, dgst) -} - -// Unsupported functions -func (pbs proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - return distribution.Descriptor{}, distribution.ErrUnsupported -} - -func (pbs proxyBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - return nil, distribution.ErrUnsupported -} - -func (pbs proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - return distribution.ErrUnsupported -} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go deleted file mode 100644 index 65d5f922..00000000 --- a/vendor/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package proxy - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/proxy/scheduler" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -type statsBlobStore struct { - stats map[string]int - blobs distribution.BlobStore -} - -func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - sbs.stats["put"]++ - return sbs.blobs.Put(ctx, mediaType, p) -} - -func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - sbs.stats["get"]++ - return sbs.blobs.Get(ctx, dgst) -} - -func (sbs statsBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { - sbs.stats["create"]++ - return sbs.blobs.Create(ctx) -} - -func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - sbs.stats["resume"]++ - return sbs.blobs.Resume(ctx, id) -} - -func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - sbs.stats["open"]++ - return sbs.blobs.Open(ctx, dgst) -} - -func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - sbs.stats["serveblob"]++ - return sbs.blobs.ServeBlob(ctx, w, r, dgst) -} - -func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - sbs.stats["stat"]++ - return sbs.blobs.Stat(ctx, dgst) -} - -func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - sbs.stats["delete"]++ - return sbs.blobs.Delete(ctx, dgst) -} - -type testEnv struct { - inRemote []distribution.Descriptor - store proxyBlobStore - ctx context.Context -} - -func (te testEnv) LocalStats() *map[string]int { - ls := te.store.localStore.(statsBlobStore).stats - return &ls -} - -func (te testEnv) RemoteStats() *map[string]int { - rs := te.store.remoteStore.(statsBlobStore).stats - return &rs -} - -// Populate remote store and record the digests -func makeTestEnv(t *testing.T, name string) testEnv { - ctx := context.Background() - - localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true) - localRepo, err := localRegistry.Repository(ctx, name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - - truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false) - truthRepo, err := truthRegistry.Repository(ctx, name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - - truthBlobs := statsBlobStore{ - stats: make(map[string]int), - blobs: truthRepo.Blobs(ctx), - } - - localBlobs := statsBlobStore{ - stats: make(map[string]int), - blobs: localRepo.Blobs(ctx), - } - - s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") - - proxyBlobStore := proxyBlobStore{ - remoteStore: truthBlobs, - localStore: localBlobs, - scheduler: s, - } - - te := testEnv{ - store: proxyBlobStore, - ctx: ctx, - } - return te -} - -func populate(t *testing.T, te *testEnv, blobCount int) { - var inRemote []distribution.Descriptor - for i := 0; i < blobCount; i++ { - bytes := []byte(fmt.Sprintf("blob%d", i)) - - desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) - if err != nil { - t.Errorf("Put in store") - } - inRemote = append(inRemote, desc) - } - - te.inRemote = inRemote - -} - -func TestProxyStoreStat(t *testing.T) { - te := makeTestEnv(t, "foo/bar") - remoteBlobCount := 1 - populate(t, &te, remoteBlobCount) - - localStats := te.LocalStats() - remoteStats := te.RemoteStats() - - // Stat - touches both stores - for _, d := range te.inRemote { - _, err := te.store.Stat(te.ctx, d.Digest) - if err != nil { - t.Fatalf("Error stating proxy store") - } - } - - if (*localStats)["stat"] != remoteBlobCount { - t.Errorf("Unexpected local stat count") - } - - if (*remoteStats)["stat"] != remoteBlobCount { - t.Errorf("Unexpected remote stat count") - } -} - -func TestProxyStoreServe(t *testing.T) { - te := makeTestEnv(t, "foo/bar") - remoteBlobCount := 1 - populate(t, &te, remoteBlobCount) - - localStats := te.LocalStats() - remoteStats := te.RemoteStats() - - // Serveblob - pulls through blobs - for _, dr := range te.inRemote { - w := httptest.NewRecorder() - r, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) - if err != nil { - t.Fatalf(err.Error()) - } - - dl, err := digest.FromBytes(w.Body.Bytes()) - if err != nil { - t.Fatalf("Error making digest from blob") - } - if dl != dr.Digest { - t.Errorf("Mismatching blob fetch from proxy") - } - } - - if (*localStats)["stat"] != remoteBlobCount && (*localStats)["create"] != remoteBlobCount { - t.Fatalf("unexpected local stats") - } - if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { - t.Fatalf("unexpected local stats") - } - - // Serveblob - blobs come from local - for _, dr := range te.inRemote { - w := httptest.NewRecorder() - r, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) - if err != nil { - t.Fatalf(err.Error()) - } - - dl, err := digest.FromBytes(w.Body.Bytes()) - if err != nil { - t.Fatalf("Error making digest from blob") - } - if dl != dr.Digest { - t.Errorf("Mismatching blob fetch from proxy") - } - } - - // Stat to find local, but no new blobs were created - if (*localStats)["stat"] != remoteBlobCount*2 && (*localStats)["create"] != remoteBlobCount*2 { - t.Fatalf("unexpected local stats") - } - - // Remote unchanged - if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { - fmt.Printf("\tlocal=%#v, \n\tremote=%#v\n", localStats, remoteStats) - t.Fatalf("unexpected local stats") - } - -} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go deleted file mode 100644 index 5b79c8ce..00000000 --- a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore.go +++ /dev/null @@ -1,155 +0,0 @@ -package proxy - -import ( - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/proxy/scheduler" -) - -// todo(richardscothern): from cache control header or config -const repositoryTTL = time.Duration(24 * 7 * time.Hour) - -type proxyManifestStore struct { - ctx context.Context - localManifests distribution.ManifestService - remoteManifests distribution.ManifestService - repositoryName string - scheduler *scheduler.TTLExpirationScheduler -} - -var _ distribution.ManifestService = &proxyManifestStore{} - -func (pms proxyManifestStore) Exists(dgst digest.Digest) (bool, error) { - exists, err := pms.localManifests.Exists(dgst) - if err != nil { - return false, err - } - if exists { - return true, nil - } - - return pms.remoteManifests.Exists(dgst) -} - -func (pms proxyManifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { - sm, err := pms.localManifests.Get(dgst) - if err == nil { - proxyMetrics.ManifestPush(uint64(len(sm.Raw))) - return sm, err - } - - sm, err = pms.remoteManifests.Get(dgst) - if err != nil { - return nil, err - } - - proxyMetrics.ManifestPull(uint64(len(sm.Raw))) - err = pms.localManifests.Put(sm) - if err != nil { - return nil, err - } - - // Schedule the repo for removal - pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) - - // Ensure the manifest blob is cleaned up - pms.scheduler.AddBlob(dgst.String(), repositoryTTL) - - proxyMetrics.ManifestPush(uint64(len(sm.Raw))) - - return sm, err -} - -func (pms proxyManifestStore) Tags() ([]string, error) { - return pms.localManifests.Tags() -} - -func (pms proxyManifestStore) ExistsByTag(tag string) (bool, error) { - exists, err := pms.localManifests.ExistsByTag(tag) - if err != nil { - return false, err - } - if exists { - return true, nil - } - - return pms.remoteManifests.ExistsByTag(tag) -} - -func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { - var localDigest digest.Digest - - localManifest, err := pms.localManifests.GetByTag(tag, options...) - switch err.(type) { - case distribution.ErrManifestUnknown, distribution.ErrManifestUnknownRevision: - goto fromremote - case nil: - break - default: - return nil, err - } - - localDigest, err = manifestDigest(localManifest) - if err != nil { - return nil, err - } - -fromremote: - var sm *manifest.SignedManifest - sm, err = pms.remoteManifests.GetByTag(tag, client.AddEtagToTag(tag, localDigest.String())) - if err != nil { - return nil, err - } - - if sm == nil { - context.GetLogger(pms.ctx).Debugf("Local manifest for %q is latest, dgst=%s", tag, localDigest.String()) - return localManifest, nil - } - context.GetLogger(pms.ctx).Debugf("Updated manifest for %q, dgst=%s", tag, localDigest.String()) - - err = pms.localManifests.Put(sm) - if err != nil { - return nil, err - } - - dgst, err := manifestDigest(sm) - if err != nil { - return nil, err - } - pms.scheduler.AddBlob(dgst.String(), repositoryTTL) - pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) - - proxyMetrics.ManifestPull(uint64(len(sm.Raw))) - proxyMetrics.ManifestPush(uint64(len(sm.Raw))) - - return sm, err -} - -func manifestDigest(sm *manifest.SignedManifest) (digest.Digest, error) { - payload, err := sm.Payload() - if err != nil { - return "", err - - } - - dgst, err := digest.FromBytes(payload) - if err != nil { - return "", err - } - - return dgst, nil -} - -func (pms proxyManifestStore) Put(manifest *manifest.SignedManifest) error { - return v2.ErrorCodeUnsupported -} - -func (pms proxyManifestStore) Delete(dgst digest.Digest) error { - return v2.ErrorCodeUnsupported -} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go b/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go deleted file mode 100644 index 7b9b8091..00000000 --- a/vendor/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package proxy - -import ( - "io" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/proxy/scheduler" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" -) - -type statsManifest struct { - manifests distribution.ManifestService - stats map[string]int -} - -type manifestStoreTestEnv struct { - manifestDigest digest.Digest // digest of the signed manifest in the local storage - manifests proxyManifestStore -} - -func (te manifestStoreTestEnv) LocalStats() *map[string]int { - ls := te.manifests.localManifests.(statsManifest).stats - return &ls -} - -func (te manifestStoreTestEnv) RemoteStats() *map[string]int { - rs := te.manifests.remoteManifests.(statsManifest).stats - return &rs -} - -func (sm statsManifest) Delete(dgst digest.Digest) error { - sm.stats["delete"]++ - return sm.manifests.Delete(dgst) -} - -func (sm statsManifest) Exists(dgst digest.Digest) (bool, error) { - sm.stats["exists"]++ - return sm.manifests.Exists(dgst) -} - -func (sm statsManifest) ExistsByTag(tag string) (bool, error) { - sm.stats["existbytag"]++ - return sm.manifests.ExistsByTag(tag) -} - -func (sm statsManifest) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { - sm.stats["get"]++ - return sm.manifests.Get(dgst) -} - -func (sm statsManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { - sm.stats["getbytag"]++ - return sm.manifests.GetByTag(tag, options...) -} - -func (sm statsManifest) Put(manifest *manifest.SignedManifest) error { - sm.stats["put"]++ - return sm.manifests.Put(manifest) -} - -func (sm statsManifest) Tags() ([]string, error) { - sm.stats["tags"]++ - return sm.manifests.Tags() -} - -func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { - ctx := context.Background() - truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false) - truthRepo, err := truthRegistry.Repository(ctx, name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - tr, err := truthRepo.Manifests(ctx) - if err != nil { - t.Fatal(err.Error()) - } - truthManifests := statsManifest{ - manifests: tr, - stats: make(map[string]int), - } - - manifestDigest, err := populateRepo(t, ctx, truthRepo, name, tag) - if err != nil { - t.Fatalf(err.Error()) - } - - localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true) - localRepo, err := localRegistry.Repository(ctx, name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - lr, err := localRepo.Manifests(ctx) - if err != nil { - t.Fatal(err.Error()) - } - - localManifests := statsManifest{ - manifests: lr, - stats: make(map[string]int), - } - - s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") - return &manifestStoreTestEnv{ - manifestDigest: manifestDigest, - manifests: proxyManifestStore{ - ctx: ctx, - localManifests: localManifests, - remoteManifests: truthManifests, - scheduler: s, - }, - } -} - -func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { - m := manifest.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: name, - Tag: tag, - } - - for i := 0; i < 2; i++ { - wr, err := repository.Blobs(ctx).Create(ctx) - if err != nil { - t.Fatalf("unexpected error creating test upload: %v", err) - } - - rs, ts, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("unexpected error generating test layer file") - } - dgst := digest.Digest(ts) - if _, err := io.Copy(wr, rs); err != nil { - t.Fatalf("unexpected error copying to upload: %v", err) - } - - if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - sm, err := manifest.Sign(&m, pk) - if err != nil { - t.Fatalf("error signing manifest: %v", err) - } - - ms, err := repository.Manifests(ctx) - if err != nil { - t.Fatalf(err.Error()) - } - ms.Put(sm) - if err != nil { - t.Fatalf("unexpected errors putting manifest: %v", err) - } - pl, err := sm.Payload() - if err != nil { - t.Fatal(err) - } - return digest.FromBytes(pl) -} - -// TestProxyManifests contains basic acceptance tests -// for the pull-through behavior -func TestProxyManifests(t *testing.T) { - name := "foo/bar" - env := newManifestStoreTestEnv(t, name, "latest") - - localStats := env.LocalStats() - remoteStats := env.RemoteStats() - - // Stat - must check local and remote - exists, err := env.manifests.ExistsByTag("latest") - if err != nil { - t.Fatalf("Error checking existance") - } - if !exists { - t.Errorf("Unexpected non-existant manifest") - } - - if (*localStats)["existbytag"] != 1 && (*remoteStats)["existbytag"] != 1 { - t.Errorf("Unexpected exists count") - } - - // Get - should succeed and pull manifest into local - _, err = env.manifests.Get(env.manifestDigest) - if err != nil { - t.Fatal(err) - } - if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { - t.Errorf("Unexpected get count") - } - - if (*localStats)["put"] != 1 { - t.Errorf("Expected local put") - } - - // Stat - should only go to local - exists, err = env.manifests.ExistsByTag("latest") - if err != nil { - t.Fatal(err) - } - if !exists { - t.Errorf("Unexpected non-existant manifest") - } - - if (*localStats)["existbytag"] != 2 && (*remoteStats)["existbytag"] != 1 { - t.Errorf("Unexpected exists count") - - } - - // Get - should get from remote, to test freshness - _, err = env.manifests.Get(env.manifestDigest) - if err != nil { - t.Fatal(err) - } - - if (*remoteStats)["get"] != 2 && (*remoteStats)["existsbytag"] != 1 && (*localStats)["put"] != 1 { - t.Errorf("Unexpected get count") - } - -} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go b/vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go deleted file mode 100644 index d3d84d78..00000000 --- a/vendor/github.com/docker/distribution/registry/proxy/proxymetrics.go +++ /dev/null @@ -1,74 +0,0 @@ -package proxy - -import ( - "expvar" - "sync/atomic" -) - -// Metrics is used to hold metric counters -// related to the proxy -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 - BytesPulled uint64 - BytesPushed uint64 -} - -type proxyMetricsCollector struct { - blobMetrics Metrics - manifestMetrics Metrics -} - -// BlobPull tracks metrics about blobs pulled into the cache -func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { - atomic.AddUint64(&pmc.blobMetrics.Misses, 1) - atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) -} - -// BlobPush tracks metrics about blobs pushed to clients -func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { - atomic.AddUint64(&pmc.blobMetrics.Requests, 1) - atomic.AddUint64(&pmc.blobMetrics.Hits, 1) - atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) -} - -// ManifestPull tracks metrics related to Manifests pulled into the cache -func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { - atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) - atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) -} - -// ManifestPush tracks metrics about manifests pushed to clients -func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { - atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) - atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) - atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) -} - -// proxyMetrics tracks metrics about the proxy cache. This is -// kept globally and made available via expvar. -var proxyMetrics = &proxyMetricsCollector{} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - pm := registry.(*expvar.Map).Get("proxy") - if pm == nil { - pm = &expvar.Map{} - pm.(*expvar.Map).Init() - registry.(*expvar.Map).Set("proxy", pm) - } - - pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { - return proxyMetrics.blobMetrics - })) - - pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { - return proxyMetrics.manifestMetrics - })) - -} diff --git a/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go b/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go deleted file mode 100644 index e9dec2f7..00000000 --- a/vendor/github.com/docker/distribution/registry/proxy/proxyregistry.go +++ /dev/null @@ -1,139 +0,0 @@ -package proxy - -import ( - "net/http" - "net/url" - - "github.com/docker/distribution" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/proxy/scheduler" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/driver" -) - -// proxyingRegistry fetches content from a remote registry and caches it locally -type proxyingRegistry struct { - embedded distribution.Namespace // provides local registry functionality - - scheduler *scheduler.TTLExpirationScheduler - - remoteURL string - credentialStore auth.CredentialStore - challengeManager auth.ChallengeManager -} - -// NewRegistryPullThroughCache creates a registry acting as a pull through cache -func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { - _, err := url.Parse(config.RemoteURL) - if err != nil { - return nil, err - } - - v := storage.NewVacuum(ctx, driver) - - s := scheduler.New(ctx, driver, "/scheduler-state.json") - s.OnBlobExpire(func(digest string) error { - return v.RemoveBlob(digest) - }) - s.OnManifestExpire(func(repoName string) error { - return v.RemoveRepository(repoName) - }) - err = s.Start() - if err != nil { - return nil, err - } - - challengeManager := auth.NewSimpleChallengeManager() - cs, err := ConfigureAuth(config.RemoteURL, config.Username, config.Password, challengeManager) - if err != nil { - return nil, err - } - - return &proxyingRegistry{ - embedded: registry, - scheduler: s, - challengeManager: challengeManager, - credentialStore: cs, - remoteURL: config.RemoteURL, - }, nil -} - -func (pr *proxyingRegistry) Scope() distribution.Scope { - return distribution.GlobalScope -} - -func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { - return pr.embedded.Repositories(ctx, repos, last) -} - -func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distribution.Repository, error) { - tr := transport.NewTransport(http.DefaultTransport, - auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name, "pull"))) - - localRepo, err := pr.embedded.Repository(ctx, name) - if err != nil { - return nil, err - } - localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification) - if err != nil { - return nil, err - } - - remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL, tr) - if err != nil { - return nil, err - } - - remoteManifests, err := remoteRepo.Manifests(ctx) - if err != nil { - return nil, err - } - - return &proxiedRepository{ - blobStore: proxyBlobStore{ - localStore: localRepo.Blobs(ctx), - remoteStore: remoteRepo.Blobs(ctx), - scheduler: pr.scheduler, - }, - manifests: proxyManifestStore{ - repositoryName: name, - localManifests: localManifests, // Options? - remoteManifests: remoteManifests, - ctx: ctx, - scheduler: pr.scheduler, - }, - name: name, - signatures: localRepo.Signatures(), - }, nil -} - -// proxiedRepository uses proxying blob and manifest services to serve content -// locally, or pulling it through from a remote and caching it locally if it doesn't -// already exist -type proxiedRepository struct { - blobStore distribution.BlobStore - manifests distribution.ManifestService - name string - signatures distribution.SignatureService -} - -func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // options - return pr.manifests, nil -} - -func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { - return pr.blobStore -} - -func (pr *proxiedRepository) Name() string { - return pr.name -} - -func (pr *proxiedRepository) Signatures() distribution.SignatureService { - return pr.signatures -} diff --git a/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go deleted file mode 100644 index 056b148a..00000000 --- a/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go +++ /dev/null @@ -1,250 +0,0 @@ -package scheduler - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// onTTLExpiryFunc is called when a repositories' TTL expires -type expiryFunc func(string) error - -const ( - entryTypeBlob = iota - entryTypeManifest -) - -// schedulerEntry represents an entry in the scheduler -// fields are exported for serialization -type schedulerEntry struct { - Key string `json:"Key"` - Expiry time.Time `json:"ExpiryData"` - EntryType int `json:"EntryType"` -} - -// New returns a new instance of the scheduler -func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { - return &TTLExpirationScheduler{ - entries: make(map[string]schedulerEntry), - addChan: make(chan schedulerEntry), - stopChan: make(chan bool), - driver: driver, - pathToStateFile: path, - ctx: ctx, - stopped: true, - } -} - -// TTLExpirationScheduler is a scheduler used to perform actions -// when TTLs expire -type TTLExpirationScheduler struct { - entries map[string]schedulerEntry - addChan chan schedulerEntry - stopChan chan bool - - driver driver.StorageDriver - ctx context.Context - pathToStateFile string - - stopped bool - - onBlobExpire expiryFunc - onManifestExpire expiryFunc -} - -// addChan allows more TTLs to be pushed to the scheduler -type addChan chan schedulerEntry - -// stopChan allows the scheduler to be stopped - used for testing. -type stopChan chan bool - -// OnBlobExpire is called when a scheduled blob's TTL expires -func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { - ttles.onBlobExpire = f -} - -// OnManifestExpire is called when a scheduled manifest's TTL expires -func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { - ttles.onManifestExpire = f -} - -// AddBlob schedules a blob cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) error { - if ttles.stopped { - return fmt.Errorf("scheduler not started") - } - ttles.add(dgst, ttl, entryTypeBlob) - return nil -} - -// AddManifest schedules a manifest cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Duration) error { - if ttles.stopped { - return fmt.Errorf("scheduler not started") - } - - ttles.add(repoName, ttl, entryTypeManifest) - return nil -} - -// Start starts the scheduler -func (ttles *TTLExpirationScheduler) Start() error { - return ttles.start() -} - -func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { - entry := schedulerEntry{ - Key: key, - Expiry: time.Now().Add(ttl), - EntryType: eType, - } - ttles.addChan <- entry -} - -func (ttles *TTLExpirationScheduler) stop() { - ttles.stopChan <- true -} - -func (ttles *TTLExpirationScheduler) start() error { - err := ttles.readState() - if err != nil { - return err - } - - if !ttles.stopped { - return fmt.Errorf("Scheduler already started") - } - - context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") - ttles.stopped = false - go ttles.mainloop() - - return nil -} - -// mainloop uses a select statement to listen for events. Most of its time -// is spent in waiting on a TTL to expire but can be interrupted when TTLs -// are added. -func (ttles *TTLExpirationScheduler) mainloop() { - for { - if ttles.stopped { - return - } - - nextEntry, ttl := nextExpiringEntry(ttles.entries) - if len(ttles.entries) == 0 { - context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Nothing to do, sleeping...") - } else { - context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Sleeping for %s until cleanup of %s", ttl, nextEntry.Key) - } - - select { - case <-time.After(ttl): - var f expiryFunc - - switch nextEntry.EntryType { - case entryTypeBlob: - f = ttles.onBlobExpire - case entryTypeManifest: - f = ttles.onManifestExpire - default: - f = func(repoName string) error { - return fmt.Errorf("Unexpected scheduler entry type") - } - } - - if err := f(nextEntry.Key); err != nil { - context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", nextEntry.Key, err) - } - - delete(ttles.entries, nextEntry.Key) - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } - case entry := <-ttles.addChan: - context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) - ttles.entries[entry.Key] = entry - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } - break - - case <-ttles.stopChan: - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } - ttles.stopped = true - } - } -} - -func nextExpiringEntry(entries map[string]schedulerEntry) (*schedulerEntry, time.Duration) { - if len(entries) == 0 { - return nil, 24 * time.Hour - } - - // todo:(richardscothern) this is a primitive o(n) algorithm - // but n will never be *that* big and it's all in memory. Investigate - // time.AfterFunc for heap based expiries - - first := true - var nextEntry schedulerEntry - for _, entry := range entries { - if first { - nextEntry = entry - first = false - continue - } - if entry.Expiry.Before(nextEntry.Expiry) { - nextEntry = entry - } - } - - // Dates may be from the past if the scheduler has - // been restarted, set their ttl to 0 - if nextEntry.Expiry.Before(time.Now()) { - nextEntry.Expiry = time.Now() - return &nextEntry, 0 - } - - return &nextEntry, nextEntry.Expiry.Sub(time.Now()) -} - -func (ttles *TTLExpirationScheduler) writeState() error { - jsonBytes, err := json.Marshal(ttles.entries) - if err != nil { - return err - } - - err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) - if err != nil { - return err - } - return nil -} - -func (ttles *TTLExpirationScheduler) readState() error { - if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return nil - default: - return err - } - } - - bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) - if err != nil { - return err - } - - err = json.Unmarshal(bytes, &ttles.entries) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go b/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go deleted file mode 100644 index fb5479f0..00000000 --- a/vendor/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package scheduler - -import ( - "encoding/json" - "fmt" - "testing" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func TestSchedule(t *testing.T) { - timeUnit := time.Millisecond - remainingRepos := map[string]bool{ - "testBlob1": true, - "testBlob2": true, - "ch00": true, - } - - s := New(context.Background(), inmemory.New(), "/ttl") - deleteFunc := func(repoName string) error { - if len(remainingRepos) == 0 { - t.Fatalf("Incorrect expiry count") - } - _, ok := remainingRepos[repoName] - if !ok { - t.Fatalf("Trying to remove nonexistant repo: %s", repoName) - } - fmt.Println("removing", repoName) - delete(remainingRepos, repoName) - - return nil - } - s.onBlobExpire = deleteFunc - err := s.start() - if err != nil { - t.Fatalf("Error starting ttlExpirationScheduler: %s", err) - } - - s.add("testBlob1", 3*timeUnit, entryTypeBlob) - s.add("testBlob2", 1*timeUnit, entryTypeBlob) - - func() { - s.add("ch00", 1*timeUnit, entryTypeBlob) - - }() - - // Ensure all repos are deleted - <-time.After(50 * timeUnit) - if len(remainingRepos) != 0 { - t.Fatalf("Repositories remaining: %#v", remainingRepos) - } -} - -func TestRestoreOld(t *testing.T) { - remainingRepos := map[string]bool{ - "testBlob1": true, - "oldRepo": true, - } - - deleteFunc := func(repoName string) error { - if repoName == "oldRepo" && len(remainingRepos) == 3 { - t.Errorf("oldRepo should be removed first") - } - _, ok := remainingRepos[repoName] - if !ok { - t.Fatalf("Trying to remove nonexistant repo: %s", repoName) - } - delete(remainingRepos, repoName) - return nil - } - - timeUnit := time.Millisecond - serialized, err := json.Marshal(&map[string]schedulerEntry{ - "testBlob1": { - Expiry: time.Now().Add(1 * timeUnit), - Key: "testBlob1", - EntryType: 0, - }, - "oldRepo": { - Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first - Key: "oldRepo", - EntryType: 0, - }, - }) - if err != nil { - t.Fatalf("Error serializing test data: %s", err.Error()) - } - - ctx := context.Background() - pathToStatFile := "/ttl" - fs := inmemory.New() - err = fs.PutContent(ctx, pathToStatFile, serialized) - if err != nil { - t.Fatal("Unable to write serialized data to fs") - } - s := New(context.Background(), fs, "/ttl") - s.onBlobExpire = deleteFunc - err = s.start() - if err != nil { - t.Fatalf("Error starting ttlExpirationScheduler: %s", err) - } - - <-time.After(50 * timeUnit) - if len(remainingRepos) != 0 { - t.Fatalf("Repositories remaining: %#v", remainingRepos) - } -} - -func TestStopRestore(t *testing.T) { - timeUnit := time.Millisecond - remainingRepos := map[string]bool{ - "testBlob1": true, - "testBlob2": true, - } - deleteFunc := func(repoName string) error { - delete(remainingRepos, repoName) - return nil - } - - fs := inmemory.New() - pathToStateFile := "/ttl" - s := New(context.Background(), fs, pathToStateFile) - s.onBlobExpire = deleteFunc - - err := s.start() - if err != nil { - t.Fatalf(err.Error()) - } - s.add("testBlob1", 300*timeUnit, entryTypeBlob) - s.add("testBlob2", 100*timeUnit, entryTypeBlob) - - // Start and stop before all operations complete - // state will be written to fs - s.stop() - time.Sleep(10 * time.Millisecond) - - // v2 will restore state from fs - s2 := New(context.Background(), fs, pathToStateFile) - s2.onBlobExpire = deleteFunc - err = s2.start() - if err != nil { - t.Fatalf("Error starting v2: %s", err.Error()) - } - - <-time.After(500 * timeUnit) - if len(remainingRepos) != 0 { - t.Fatalf("Repositories remaining: %#v", remainingRepos) - } - -} - -func TestDoubleStart(t *testing.T) { - s := New(context.Background(), inmemory.New(), "/ttl") - err := s.start() - if err != nil { - t.Fatalf("Unable to start scheduler") - } - fmt.Printf("%#v", s) - err = s.start() - if err == nil { - t.Fatalf("Scheduler started twice without error") - } -} diff --git a/vendor/github.com/docker/distribution/cmd/registry/main.go b/vendor/github.com/docker/distribution/registry/registry.go similarity index 64% rename from vendor/github.com/docker/distribution/cmd/registry/main.go rename to vendor/github.com/docker/distribution/registry/registry.go index 9196d316..86cb6a17 100644 --- a/vendor/github.com/docker/distribution/cmd/registry/main.go +++ b/vendor/github.com/docker/distribution/registry/registry.go @@ -1,14 +1,11 @@ -package main +package registry import ( "crypto/tls" "crypto/x509" - _ "expvar" - "flag" "fmt" "io/ioutil" "net/http" - _ "net/http/pprof" "os" "time" @@ -17,76 +14,112 @@ import ( "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" - _ "github.com/docker/distribution/health" - _ "github.com/docker/distribution/registry/auth/htpasswd" - _ "github.com/docker/distribution/registry/auth/silly" - _ "github.com/docker/distribution/registry/auth/token" + "github.com/docker/distribution/health" "github.com/docker/distribution/registry/handlers" "github.com/docker/distribution/registry/listener" - _ "github.com/docker/distribution/registry/proxy" - _ "github.com/docker/distribution/registry/storage/driver/azure" - _ "github.com/docker/distribution/registry/storage/driver/filesystem" - _ "github.com/docker/distribution/registry/storage/driver/inmemory" - _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" - _ "github.com/docker/distribution/registry/storage/driver/oss" - _ "github.com/docker/distribution/registry/storage/driver/s3" - _ "github.com/docker/distribution/registry/storage/driver/swift" "github.com/docker/distribution/uuid" "github.com/docker/distribution/version" gorhandlers "github.com/gorilla/handlers" + "github.com/spf13/cobra" "github.com/yvasiyarov/gorelic" ) +// Cmd is a cobra command for running the registry. +var Cmd = &cobra.Command{ + Use: "registry ", + Short: "registry stores and distributes Docker images", + Long: "registry stores and distributes Docker images.", + Run: func(cmd *cobra.Command, args []string) { + if showVersion { + version.PrintVersion() + return + } + + // setup context + ctx := context.WithVersion(context.Background(), version.Version) + + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + if config.HTTP.Debug.Addr != "" { + go func(addr string) { + log.Infof("debug server listening %v", addr) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("error listening on debug interface: %v", err) + } + }(config.HTTP.Debug.Addr) + } + + registry, err := NewRegistry(ctx, config) + if err != nil { + log.Fatalln(err) + } + + if err = registry.ListenAndServe(); err != nil { + log.Fatalln(err) + } + }, +} + var showVersion bool func init() { - flag.BoolVar(&showVersion, "version", false, "show the version and exit") + Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") } -func main() { - flag.Usage = usage - flag.Parse() - - if showVersion { - version.PrintVersion() - return - } - - ctx := context.Background() - ctx = context.WithValue(ctx, "version", version.Version) - - config, err := resolveConfiguration() - if err != nil { - fatalf("configuration error: %v", err) - } +// A Registry represents a complete instance of the registry. +// TODO(aaronl): It might make sense for Registry to become an interface. +type Registry struct { + config *configuration.Configuration + app *handlers.App + server *http.Server +} +// NewRegistry creates a new registry from a context and configuration struct. +func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { + var err error ctx, err = configureLogging(ctx, config) if err != nil { - fatalf("error configuring logger: %v", err) + return nil, fmt.Errorf("error configuring logger: %v", err) } // inject a logger into the uuid library. warns us if there is a problem // with uuid generation under low entropy. uuid.Loggerf = context.GetLogger(ctx).Warnf - app := handlers.NewApp(ctx, *config) + app := handlers.NewApp(ctx, config) + // TODO(aaronl): The global scope of the health checks means NewRegistry + // can only be called once per process. + app.RegisterHealthChecks() handler := configureReporting(app) + handler = alive("/", handler) + handler = health.Handler(handler) handler = panicHandler(handler) handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) - if config.HTTP.Debug.Addr != "" { - go debugServer(config.HTTP.Debug.Addr) - } - server := &http.Server{ Handler: handler, } + return &Registry{ + app: app, + config: config, + server: server, + }, nil +} + +// ListenAndServe runs the registry's HTTP server. +func (registry *Registry) ListenAndServe() error { + config := registry.config + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) if err != nil { - context.GetLogger(app).Fatalln(err) + return err } - defer ln.Close() if config.HTTP.TLS.Certificate != "" { tlsConf := &tls.Config{ @@ -109,7 +142,7 @@ func main() { tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) if err != nil { - context.GetLogger(app).Fatalln(err) + return err } if len(config.HTTP.TLS.ClientCAs) != 0 { @@ -118,16 +151,16 @@ func main() { for _, ca := range config.HTTP.TLS.ClientCAs { caPem, err := ioutil.ReadFile(ca) if err != nil { - context.GetLogger(app).Fatalln(err) + return err } if ok := pool.AppendCertsFromPEM(caPem); !ok { - context.GetLogger(app).Fatalln(fmt.Errorf("Could not add CA to pool")) + return fmt.Errorf("Could not add CA to pool") } } for _, subj := range pool.Subjects() { - context.GetLogger(app).Debugf("CA Subject: %s", string(subj)) + context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) } tlsConf.ClientAuth = tls.RequireAndVerifyClientCert @@ -135,53 +168,12 @@ func main() { } ln = tls.NewListener(ln, tlsConf) - context.GetLogger(app).Infof("listening on %v, tls", ln.Addr()) + context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) } else { - context.GetLogger(app).Infof("listening on %v", ln.Addr()) + context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) } - if err := server.Serve(ln); err != nil { - context.GetLogger(app).Fatalln(err) - } -} - -func usage() { - fmt.Fprintln(os.Stderr, "usage:", os.Args[0], "") - flag.PrintDefaults() -} - -func fatalf(format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, format+"\n", args...) - usage() - os.Exit(1) -} - -func resolveConfiguration() (*configuration.Configuration, error) { - var configurationPath string - - if flag.NArg() > 0 { - configurationPath = flag.Arg(0) - } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { - configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") - } - - if configurationPath == "" { - return nil, fmt.Errorf("configuration path unspecified") - } - - fp, err := os.Open(configurationPath) - if err != nil { - return nil, err - } - - defer fp.Close() - - config, err := configuration.Parse(fp) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) - } - - return config, nil + return registry.server.Serve(ln) } func configureReporting(app *handlers.App) http.Handler { @@ -226,7 +218,7 @@ func configureLogging(ctx context.Context, config *configuration.Configuration) if config.Log.Level == "" && config.Log.Formatter == "" { // If no config for logging is set, fallback to deprecated "Loglevel". log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) + ctx = context.WithLogger(ctx, context.GetLogger(ctx)) return ctx, nil } @@ -261,9 +253,6 @@ func configureLogging(ctx context.Context, config *configuration.Configuration) log.Debugf("using %q logging formatter", config.Log.Formatter) } - // log the application version with messages - ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) - if len(config.Log.Fields) > 0 { // build up the static fields, if present. var fields []interface{} @@ -288,16 +277,6 @@ func logLevel(level configuration.Loglevel) log.Level { return l } -// debugServer starts the debug server with pprof, expvar among other -// endpoints. The addr should not be exposed externally. For most of these to -// work, tls cannot be enabled on the endpoint, so it is generally separate. -func debugServer(addr string) { - log.Infof("debug server listening %v", addr) - if err := http.ListenAndServe(addr, nil); err != nil { - log.Fatalf("error listening on debug interface: %v", err) - } -} - // panicHandler add a HTTP handler to web app. The handler recover the happening // panic. logrus.Panic transmits panic message to pre-config log hooks, which is // defined in config.yml. @@ -311,3 +290,48 @@ func panicHandler(handler http.Handler) http.Handler { handler.ServeHTTP(w, r) }) } + +// alive simply wraps the handler with a route that always returns an http 200 +// response when the path is matched. If the path is not matched, the request +// is passed to the provided handler. There is no guarantee of anything but +// that the server is up. Wrap with other handlers (such as health.Handler) +// for greater affect. +func alive(path string, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == path { + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + return + } + + handler.ServeHTTP(w, r) + }) +} + +func resolveConfiguration(args []string) (*configuration.Configuration, error) { + var configurationPath string + + if len(args) > 0 { + configurationPath = args[0] + } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { + configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") + } + + if configurationPath == "" { + return nil, fmt.Errorf("configuration path unspecified") + } + + fp, err := os.Open(configurationPath) + if err != nil { + return nil, err + } + + defer fp.Close() + + config, err := configuration.Parse(fp) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) + } + + return config, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blob_test.go b/vendor/github.com/docker/distribution/registry/storage/blob_test.go index e5cfa83e..c84c7432 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blob_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/blob_test.go @@ -33,7 +33,10 @@ func TestSimpleBlobUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -193,7 +196,10 @@ func TestSimpleBlobUpload(t *testing.T) { } // Reuse state to test delete with a delete-disabled registry - registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) + registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repository, err = registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -212,7 +218,10 @@ func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -316,7 +325,10 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/vendor/github.com/docker/distribution/registry/storage/blobstore.go b/vendor/github.com/docker/distribution/registry/storage/blobstore.go index 724617f8..f6a8ac43 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobstore.go @@ -13,7 +13,6 @@ import ( // creating and traversing backend links. type blobStore struct { driver driver.StorageDriver - pm *pathMapper statter distribution.BlobStatter } @@ -94,7 +93,7 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr // path returns the canonical path for the blob identified by digest. The blob // may or may not exist. func (bs *blobStore) path(dgst digest.Digest) (string, error) { - bp, err := bs.pm.path(blobDataPathSpec{ + bp, err := pathFor(blobDataPathSpec{ digest: dgst, }) @@ -140,7 +139,6 @@ func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { type blobStatter struct { driver driver.StorageDriver - pm *pathMapper } var _ distribution.BlobDescriptorService = &blobStatter{} @@ -149,9 +147,10 @@ var _ distribution.BlobDescriptorService = &blobStatter{} // in the main blob store. If this method returns successfully, there is // strong guarantee that the blob exists and is available. func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - path, err := bs.pm.path(blobDataPathSpec{ + path, err := pathFor(blobDataPathSpec{ digest: dgst, }) + if err != nil { return distribution.Descriptor{}, err } diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go index 2142c37f..b384fa8a 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go @@ -241,7 +241,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri if !verified { context.GetLoggerWithFields(ctx, - map[string]interface{}{ + map[interface{}]interface{}{ "canonical": canonical, "provided": desc.Digest, }, "canonical", "provided"). @@ -266,7 +266,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // identified by dgst. The layer should be validated before commencing the // move. func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { - blobPath, err := bw.blobStore.pm.path(blobDataPathSpec{ + blobPath, err := pathFor(blobDataPathSpec{ digest: desc.Digest, }) @@ -324,7 +324,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // instance. An error will be returned if the clean up cannot proceed. If the // resources are already not present, no error will be returned. func (bw *blobWriter) removeResources(ctx context.Context) error { - dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{ + dataPath, err := pathFor(uploadDataPathSpec{ name: bw.blobStore.repository.Name(), id: bw.id, }) diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go index a26ac2cc..26d3beab 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go @@ -111,12 +111,13 @@ type hashStateEntry struct { // getStoredHashStates returns a slice of hashStateEntries for this upload. func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ name: bw.blobStore.repository.Name(), id: bw.id, alg: bw.digester.Digest().Algorithm(), list: true, }) + if err != nil { return nil, err } @@ -156,12 +157,13 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { return errResumableDigestNotAvailable } - uploadHashStatePath, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ name: bw.blobStore.repository.Name(), id: bw.id, alg: bw.digester.Digest().Algorithm(), offset: int64(h.Len()), }) + if err != nil { return err } diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/suite.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go similarity index 93% rename from vendor/github.com/docker/distribution/registry/storage/cache/suite.go rename to vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go index b5a2f643..ed0f95fd 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/suite.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go @@ -1,4 +1,4 @@ -package cache +package cachecheck import ( "testing" @@ -6,19 +6,20 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache" ) // CheckBlobDescriptorCache takes a cache implementation through a common set // of operations. If adding new tests, please add them here so new // implementations get the benefit. This should be used for unit tests. -func CheckBlobDescriptorCache(t *testing.T, provider BlobDescriptorCacheProvider) { +func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { ctx := context.Background() checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) checkBlobDescriptorCacheSetAndRead(t, ctx, provider) } -func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { +func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { if _, err := provider.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { t.Fatalf("expected unknown blob error with empty store: %v", err) } @@ -56,7 +57,7 @@ func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, } } -func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { +func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { localDigest := digest.Digest("sha384:abc") expected := distribution.Descriptor{ Digest: "sha256:abc", @@ -140,7 +141,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi } } -func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { +func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { localDigest := digest.Digest("sha384:abc") expected := distribution.Descriptor{ Digest: "sha256:abc", diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go index 120a6572..68a68f08 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" ) @@ -26,7 +26,7 @@ func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider } func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if err := v2.ValidateRepositoryName(repo); err != nil { + if _, err := reference.ParseNamed(repo); err != nil { return nil, err } diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go index 3bae7ccb..49c2b5c3 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory_test.go @@ -3,11 +3,11 @@ package memory import ( "testing" - "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/cachecheck" ) // TestInMemoryBlobInfoCache checks the in memory implementation is working // correctly. func TestInMemoryBlobInfoCache(t *testing.T) { - cache.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) + cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) } diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go index 36370bdd..1736756e 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" "github.com/garyburd/redigo/redis" ) @@ -41,7 +41,7 @@ func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorC // RepositoryScoped returns the scoped cache. func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if err := v2.ValidateRepositoryName(repo); err != nil { + if _, err := reference.ParseNamed(repo); err != nil { return nil, err } diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go index ed6944a1..81bcaddd 100644 --- a/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/cachecheck" "github.com/garyburd/redigo/redis" ) @@ -47,5 +47,5 @@ func TestRedisBlobDescriptorCacheProvider(t *testing.T) { t.Fatalf("unexpected error flushing redis db: %v", err) } - cache.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) + cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) } diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog.go b/vendor/github.com/docker/distribution/registry/storage/catalog.go index 470894b7..b6768012 100644 --- a/vendor/github.com/docker/distribution/registry/storage/catalog.go +++ b/vendor/github.com/docker/distribution/registry/storage/catalog.go @@ -22,7 +22,7 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return 0, errors.New("no space in slice") } - root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + root, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return 0, err } diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog_test.go b/vendor/github.com/docker/distribution/registry/storage/catalog_test.go index 1a1dbac5..eb062c5b 100644 --- a/vendor/github.com/docker/distribution/registry/storage/catalog_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/catalog_test.go @@ -22,8 +22,11 @@ func setupFS(t *testing.T) *setupEnv { d := inmemory.New() c := []byte("") ctx := context.Background() - registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) - rootpath, _ := defaultPathMapper.path(repositoriesRootPathSpec{}) + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + rootpath, _ := pathFor(repositoriesRootPathSpec{}) repos := []string{ "/foo/a/_layers/1", diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go deleted file mode 100644 index cbb95981..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure.go +++ /dev/null @@ -1,366 +0,0 @@ -// Package azure provides a storagedriver.StorageDriver implementation to -// store blobs in Microsoft Azure Blob Storage Service. -package azure - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -const driverName = "azure" - -const ( - paramAccountName = "accountname" - paramAccountKey = "accountkey" - paramContainer = "container" - paramRealm = "realm" -) - -type driver struct { - client azure.BlobStorageClient - container string -} - -type baseEmbed struct{ base.Base } - -// Driver is a storagedriver.StorageDriver implementation backed by -// Microsoft Azure Blob Storage Service. -type Driver struct{ baseEmbed } - -func init() { - factory.Register(driverName, &azureDriverFactory{}) -} - -type azureDriverFactory struct{} - -func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -// FromParameters constructs a new Driver with a given parameters map. -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - accountName, ok := parameters[paramAccountName] - if !ok || fmt.Sprint(accountName) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountName) - } - - accountKey, ok := parameters[paramAccountKey] - if !ok || fmt.Sprint(accountKey) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) - } - - container, ok := parameters[paramContainer] - if !ok || fmt.Sprint(container) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramContainer) - } - - realm, ok := parameters[paramRealm] - if !ok || fmt.Sprint(realm) == "" { - realm = azure.DefaultBaseURL - } - - return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) -} - -// New constructs a new Driver with the given Azure Storage Account credentials -func New(accountName, accountKey, container, realm string) (*Driver, error) { - api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) - if err != nil { - return nil, err - } - - blobClient := api.GetBlobService() - - // Create registry container - if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { - return nil, err - } - - d := &driver{ - client: blobClient, - container: container} - return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil -} - -// Implement the storagedriver.StorageDriver interface. -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - blob, err := d.client.GetBlob(d.container, path) - if err != nil { - if is404(err) { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - - return ioutil.ReadAll(blob) -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { - return err - } - if err := d.client.CreateBlockBlob(d.container, path); err != nil { - return err - } - bs := newAzureBlockStorage(d.client) - bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) - _, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents)) - return err -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - if ok, err := d.client.BlobExists(d.container, path); err != nil { - return nil, err - } else if !ok { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - info, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - - size := int64(info.ContentLength) - if offset >= size { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - bytesRange := fmt.Sprintf("%v-", offset) - resp, err := d.client.GetBlobRange(d.container, path, bytesRange) - if err != nil { - return nil, err - } - return resp, nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { - if blobExists, err := d.client.BlobExists(d.container, path); err != nil { - return 0, err - } else if !blobExists { - err := d.client.CreateBlockBlob(d.container, path) - if err != nil { - return 0, err - } - } - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - bs := newAzureBlockStorage(d.client) - bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) - zw := newZeroFillWriter(&bw) - return zw.Write(d.container, path, offset, reader) -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - // Check if the path is a blob - if ok, err := d.client.BlobExists(d.container, path); err != nil { - return nil, err - } else if ok { - blob, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - - mtim, err := time.Parse(http.TimeFormat, blob.LastModified) - if err != nil { - return nil, err - } - - return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: int64(blob.ContentLength), - ModTime: mtim, - IsDir: false, - }}, nil - } - - // Check if path is a virtual container - virtContainerPath := path - if !strings.HasSuffix(virtContainerPath, "/") { - virtContainerPath += "/" - } - blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ - Prefix: virtContainerPath, - MaxResults: 1, - }) - if err != nil { - return nil, err - } - if len(blobs.Blobs) > 0 { - // path is a virtual container - return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - IsDir: true, - }}, nil - } - - // path is not a blob or virtual container - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - if path == "/" { - path = "" - } - - blobs, err := d.listBlobs(d.container, path) - if err != nil { - return blobs, err - } - - list := directDescendants(blobs, path) - return list, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) - err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) - if err != nil { - if is404(err) { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - return err - } - - return d.client.DeleteBlob(d.container, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - ok, err := d.client.DeleteBlobIfExists(d.container, path) - if err != nil { - return err - } - if ok { - return nil // was a blob and deleted, return - } - - // Not a blob, see if path is a virtual container with blobs - blobs, err := d.listBlobs(d.container, path) - if err != nil { - return err - } - - for _, b := range blobs { - if err = d.client.DeleteBlob(d.container, b); err != nil { - return err - } - } - - if len(blobs) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - return nil -} - -// URLFor returns a publicly accessible URL for the blob stored at given path -// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration - expires, ok := options["expiry"] - if ok { - t, ok := expires.(time.Time) - if ok { - expiresTime = t - } - } - return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") -} - -// directDescendants will find direct descendants (blobs or virtual containers) -// of from list of blob paths and will return their full paths. Elements in blobs -// list must be prefixed with a "/" and -// -// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is -// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} -func directDescendants(blobs []string, prefix string) []string { - if !strings.HasPrefix(prefix, "/") { // add trailing '/' - prefix = "/" + prefix - } - if !strings.HasSuffix(prefix, "/") { // containerify the path - prefix += "/" - } - - out := make(map[string]bool) - for _, b := range blobs { - if strings.HasPrefix(b, prefix) { - rel := b[len(prefix):] - c := strings.Count(rel, "/") - if c == 0 { - out[b] = true - } else { - out[prefix+rel[:strings.Index(rel, "/")]] = true - } - } - } - - var keys []string - for k := range out { - keys = append(keys, k) - } - return keys -} - -func (d *driver) listBlobs(container, virtPath string) ([]string, error) { - if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path - virtPath += "/" - } - - out := []string{} - marker := "" - for { - resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ - Marker: marker, - Prefix: virtPath, - }) - - if err != nil { - return out, err - } - - for _, b := range resp.Blobs { - out = append(out, b.Name) - } - - if len(resp.Blobs) == 0 || resp.NextMarker == "" { - break - } - marker = resp.NextMarker - } - return out, nil -} - -func is404(err error) bool { - e, ok := err.(azure.AzureStorageServiceError) - return ok && e.StatusCode == http.StatusNotFound -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go deleted file mode 100644 index 4a0661b3..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/azure_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package azure - -import ( - "fmt" - "os" - "strings" - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - . "gopkg.in/check.v1" -) - -const ( - envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" - envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" - envContainer = "AZURE_STORAGE_CONTAINER" - envRealm = "AZURE_STORAGE_REALM" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -func init() { - var ( - accountName string - accountKey string - container string - realm string - ) - - config := []struct { - env string - value *string - }{ - {envAccountName, &accountName}, - {envAccountKey, &accountKey}, - {envContainer, &container}, - {envRealm, &realm}, - } - - missing := []string{} - for _, v := range config { - *v.value = os.Getenv(v.env) - if *v.value == "" { - missing = append(missing, v.env) - } - } - - azureDriverConstructor := func() (storagedriver.StorageDriver, error) { - return New(accountName, accountKey, container, realm) - } - - // Skip Azure storage driver tests if environment variable parameters are not provided - skipCheck := func() string { - if len(missing) > 0 { - return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) - } - return "" - } - - testsuites.RegisterSuite(azureDriverConstructor, skipCheck) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go deleted file mode 100644 index 1c1df899..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go +++ /dev/null @@ -1,24 +0,0 @@ -package azure - -import ( - "fmt" - "io" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -// azureBlockStorage is adaptor between azure.BlobStorageClient and -// blockStorage interface. -type azureBlockStorage struct { - azure.BlobStorageClient -} - -func (b *azureBlockStorage) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { - return b.BlobStorageClient.GetBlobRange(container, blob, fmt.Sprintf("%v-%v", start, start+length-1)) -} - -func newAzureBlockStorage(b azure.BlobStorageClient) azureBlockStorage { - a := azureBlockStorage{} - a.BlobStorageClient = b - return a -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go deleted file mode 100644 index 7ce47195..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package azure - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -type StorageSimulator struct { - blobs map[string]*BlockBlob -} - -type BlockBlob struct { - blocks map[string]*DataBlock - blockList []string -} - -type DataBlock struct { - data []byte - committed bool -} - -func (s *StorageSimulator) path(container, blob string) string { - return fmt.Sprintf("%s/%s", container, blob) -} - -func (s *StorageSimulator) BlobExists(container, blob string) (bool, error) { - _, ok := s.blobs[s.path(container, blob)] - return ok, nil -} - -func (s *StorageSimulator) GetBlob(container, blob string) (io.ReadCloser, error) { - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return nil, fmt.Errorf("blob not found") - } - - var readers []io.Reader - for _, bID := range bb.blockList { - readers = append(readers, bytes.NewReader(bb.blocks[bID].data)) - } - return ioutil.NopCloser(io.MultiReader(readers...)), nil -} - -func (s *StorageSimulator) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { - r, err := s.GetBlob(container, blob) - if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - return ioutil.NopCloser(bytes.NewReader(b[start : start+length])), nil -} - -func (s *StorageSimulator) CreateBlockBlob(container, blob string) error { - path := s.path(container, blob) - bb := &BlockBlob{ - blocks: make(map[string]*DataBlock), - blockList: []string{}, - } - s.blobs[path] = bb - return nil -} - -func (s *StorageSimulator) PutBlock(container, blob, blockID string, chunk []byte) error { - path := s.path(container, blob) - bb, ok := s.blobs[path] - if !ok { - return fmt.Errorf("blob not found") - } - data := make([]byte, len(chunk)) - copy(data, chunk) - bb.blocks[blockID] = &DataBlock{data: data, committed: false} // add block to blob - return nil -} - -func (s *StorageSimulator) GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) { - resp := azure.BlockListResponse{} - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return resp, fmt.Errorf("blob not found") - } - - // Iterate committed blocks (in order) - if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { - for _, blockID := range bb.blockList { - b := bb.blocks[blockID] - block := azure.BlockResponse{ - Name: blockID, - Size: int64(len(b.data)), - } - resp.CommittedBlocks = append(resp.CommittedBlocks, block) - } - - } - - // Iterate uncommitted blocks (in no order) - if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { - for blockID, b := range bb.blocks { - block := azure.BlockResponse{ - Name: blockID, - Size: int64(len(b.data)), - } - if !b.committed { - resp.UncommittedBlocks = append(resp.UncommittedBlocks, block) - } - } - } - return resp, nil -} - -func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.Block) error { - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return fmt.Errorf("blob not found") - } - - var blockIDs []string - for _, v := range blocks { - bl, ok := bb.blocks[v.ID] - if !ok { // check if block ID exists - return fmt.Errorf("Block id '%s' not found", v.ID) - } - bl.committed = true - blockIDs = append(blockIDs, v.ID) - } - - // Mark all other blocks uncommitted - for k, b := range bb.blocks { - inList := false - for _, v := range blockIDs { - if k == v { - inList = true - break - } - } - if !inList { - b.committed = false - } - } - - bb.blockList = blockIDs - return nil -} - -func NewStorageSimulator() StorageSimulator { - return StorageSimulator{ - blobs: make(map[string]*BlockBlob), - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockid.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockid.go deleted file mode 100644 index 776c7cd5..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockid.go +++ /dev/null @@ -1,60 +0,0 @@ -package azure - -import ( - "encoding/base64" - "fmt" - "math/rand" - "sync" - "time" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -type blockIDGenerator struct { - pool map[string]bool - r *rand.Rand - m sync.Mutex -} - -// Generate returns an unused random block id and adds the generated ID -// to list of used IDs so that the same block name is not used again. -func (b *blockIDGenerator) Generate() string { - b.m.Lock() - defer b.m.Unlock() - - var id string - for { - id = toBlockID(int(b.r.Int())) - if !b.exists(id) { - break - } - } - b.pool[id] = true - return id -} - -func (b *blockIDGenerator) exists(id string) bool { - _, used := b.pool[id] - return used -} - -func (b *blockIDGenerator) Feed(blocks azure.BlockListResponse) { - b.m.Lock() - defer b.m.Unlock() - - for _, bl := range append(blocks.CommittedBlocks, blocks.UncommittedBlocks...) { - b.pool[bl.Name] = true - } -} - -func newBlockIDGenerator() *blockIDGenerator { - return &blockIDGenerator{ - pool: make(map[string]bool), - r: rand.New(rand.NewSource(time.Now().UnixNano()))} -} - -// toBlockId converts given integer to base64-encoded block ID of a fixed length. -func toBlockID(i int) string { - s := fmt.Sprintf("%029d", i) // add zero padding for same length-blobs - return base64.StdEncoding.EncodeToString([]byte(s)) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go deleted file mode 100644 index aab70202..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package azure - -import ( - "math" - "testing" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -func Test_blockIdGenerator(t *testing.T) { - r := newBlockIDGenerator() - - for i := 1; i <= 10; i++ { - if expected := i - 1; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - if id := r.Generate(); id == "" { - t.Fatal("returned empty id") - } - if expected := i; len(r.pool) != expected { - t.Fatalf("rand pool has wrong number of items: %d, expected:%d", len(r.pool), expected) - } - } -} - -func Test_blockIdGenerator_Feed(t *testing.T) { - r := newBlockIDGenerator() - if expected := 0; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed empty list - blocks := azure.BlockListResponse{} - r.Feed(blocks) - if expected := 0; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed blocks - blocks = azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"1", 1}, - {"2", 2}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"3", 3}, - }} - r.Feed(blocks) - if expected := 3; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed same block IDs with committed/uncommitted place changed - blocks = azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"3", 3}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"1", 1}, - }} - r.Feed(blocks) - if expected := 3; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } -} - -func Test_toBlockId(t *testing.T) { - min := 0 - max := math.MaxInt64 - - if len(toBlockID(min)) != len(toBlockID(max)) { - t.Fatalf("different-sized blockIDs are returned") - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go deleted file mode 100644 index f18692d0..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go +++ /dev/null @@ -1,208 +0,0 @@ -package azure - -import ( - "fmt" - "io" - "io/ioutil" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -// blockStorage is the interface required from a block storage service -// client implementation -type blockStorage interface { - CreateBlockBlob(container, blob string) error - GetBlob(container, blob string) (io.ReadCloser, error) - GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) - PutBlock(container, blob, blockID string, chunk []byte) error - GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) - PutBlockList(container, blob string, blocks []azure.Block) error -} - -// randomBlobWriter enables random access semantics on Azure block blobs -// by enabling writing arbitrary length of chunks to arbitrary write offsets -// within the blob. Normally, Azure Blob Storage does not support random -// access semantics on block blobs; however, this writer can download, split and -// reupload the overlapping blocks and discards those being overwritten entirely. -type randomBlobWriter struct { - bs blockStorage - blockSize int -} - -func newRandomBlobWriter(bs blockStorage, blockSize int) randomBlobWriter { - return randomBlobWriter{bs: bs, blockSize: blockSize} -} - -// WriteBlobAt writes the given chunk to the specified position of an existing blob. -// The offset must be equals to size of the blob or smaller than it. -func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) { - rand := newBlockIDGenerator() - - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - rand.Feed(blocks) // load existing block IDs - - // Check for write offset for existing blob - size := getBlobSize(blocks) - if offset < 0 || offset > size { - return 0, fmt.Errorf("wrong offset for Write: %v", offset) - } - - // Upload the new chunk as blocks - blockList, nn, err := r.writeChunkToBlocks(container, blob, chunk, rand) - if err != nil { - return 0, err - } - - // For non-append operations, existing blocks may need to be splitted - if offset != size { - // Split the block on the left end (if any) - leftBlocks, err := r.blocksLeftSide(container, blob, offset, rand) - if err != nil { - return 0, err - } - blockList = append(leftBlocks, blockList...) - - // Split the block on the right end (if any) - rightBlocks, err := r.blocksRightSide(container, blob, offset, nn, rand) - if err != nil { - return 0, err - } - blockList = append(blockList, rightBlocks...) - } else { - // Use existing block list - var existingBlocks []azure.Block - for _, v := range blocks.CommittedBlocks { - existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - } - blockList = append(existingBlocks, blockList...) - } - // Put block list - return nn, r.bs.PutBlockList(container, blob, blockList) -} - -func (r *randomBlobWriter) GetSize(container, blob string) (int64, error) { - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - return getBlobSize(blocks), nil -} - -// writeChunkToBlocks writes given chunk to one or multiple blocks within specified -// blob and returns their block representations. Those blocks are not committed, yet -func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.Reader, rand *blockIDGenerator) ([]azure.Block, int64, error) { - var newBlocks []azure.Block - var nn int64 - - // Read chunks of at most size N except the last chunk to - // maximize block size and minimize block count. - buf := make([]byte, r.blockSize) - for { - n, err := io.ReadFull(chunk, buf) - if err == io.EOF { - break - } - nn += int64(n) - data := buf[:n] - blockID := rand.Generate() - if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { - return newBlocks, nn, err - } - newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted}) - } - return newBlocks, nn, nil -} - -// blocksLeftSide returns the blocks that are going to be at the left side of -// the writeOffset: [0, writeOffset) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset int64, rand *blockIDGenerator) ([]azure.Block, error) { - var left []azure.Block - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return left, err - } - - o := writeOffset - elapsed := int64(0) - for _, v := range bx.CommittedBlocks { - blkSize := int64(v.Size) - if o >= blkSize { // use existing block - left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - o -= blkSize - elapsed += blkSize - } else if o > 0 { // current block needs to be splitted - start := elapsed - size := o - part, err := r.bs.GetSectionReader(container, blob, start, size) - if err != nil { - return left, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return left, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return left, err - } - left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) - break - } - } - return left, nil -} - -// blocksRightSide returns the blocks that are going to be at the right side of -// the written chunk: [writeOffset+size, +inf) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset int64, chunkSize int64, rand *blockIDGenerator) ([]azure.Block, error) { - var right []azure.Block - - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return nil, err - } - - re := writeOffset + chunkSize - 1 // right end of written chunk - var elapsed int64 - for _, v := range bx.CommittedBlocks { - var ( - bs = elapsed // left end of current block - be = elapsed + int64(v.Size) - 1 // right end of current block - ) - - if bs > re { // take the block as is - right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - } else if be > re { // current block needs to be splitted - part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) - if err != nil { - return right, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return right, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return right, err - } - right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) - } - elapsed += int64(v.Size) - } - return right, nil -} - -func getBlobSize(blocks azure.BlockListResponse) int64 { - var n int64 - for _, v := range blocks.CommittedBlocks { - n += int64(v.Size) - } - return n -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go deleted file mode 100644 index 32c2509e..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go +++ /dev/null @@ -1,339 +0,0 @@ -package azure - -import ( - "bytes" - "io" - "io/ioutil" - "math/rand" - "reflect" - "strings" - "testing" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -func TestRandomWriter_writeChunkToBlocks(t *testing.T) { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 3) - rand := newBlockIDGenerator() - c := []byte("AAABBBCCCD") - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, nn, err := rw.writeChunkToBlocks("a", "b", bytes.NewReader(c), rand) - if err != nil { - t.Fatal(err) - } - if expected := int64(len(c)); nn != expected { - t.Fatalf("wrong nn:%v, expected:%v", nn, expected) - } - if expected := 4; len(bw) != expected { - t.Fatal("unexpected written block count") - } - - bx, err := s.GetBlockList("a", "b", azure.BlockListTypeAll) - if err != nil { - t.Fatal(err) - } - if expected := 0; len(bx.CommittedBlocks) != expected { - t.Fatal("unexpected committed block count") - } - if expected := 4; len(bx.UncommittedBlocks) != expected { - t.Fatalf("unexpected uncommitted block count: %d -- %#v", len(bx.UncommittedBlocks), bx) - } - - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - assertBlobContents(t, r, c) -} - -func TestRandomWriter_blocksLeftSide(t *testing.T) { - blob := "AAAAABBBBBCCC" - cases := []struct { - offset int64 - expectedBlob string - expectedPattern []azure.BlockStatus - }{ - {0, "", []azure.BlockStatus{}}, // write to beginning, discard all - {13, blob, []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to end, no change - {1, "A", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // write at 1 - {5, "AAAAA", []azure.BlockStatus{azure.BlockStatusCommitted}}, // write just after first block - {6, "AAAAAB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // split the second block - {9, "AAAAABBBB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // write just after first block - } - - for _, c := range cases { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 5) - rand := newBlockIDGenerator() - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) - if err != nil { - t.Fatal(err) - } - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - bx, err := rw.blocksLeftSide("a", "b", c.offset, rand) - if err != nil { - t.Fatal(err) - } - - bs := []azure.BlockStatus{} - for _, v := range bx { - bs = append(bs, v.Status) - } - - if !reflect.DeepEqual(bs, c.expectedPattern) { - t.Logf("Committed blocks %v", bw) - t.Fatalf("For offset %v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.expectedPattern, bs, bx) - } - if rw.bs.PutBlockList("a", "b", bx); err != nil { - t.Fatal(err) - } - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - cout, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - outBlob := string(cout) - if outBlob != c.expectedBlob { - t.Fatalf("wrong blob contents: %v, expected: %v", outBlob, c.expectedBlob) - } - } -} - -func TestRandomWriter_blocksRightSide(t *testing.T) { - blob := "AAAAABBBBBCCC" - cases := []struct { - offset int64 - size int64 - expectedBlob string - expectedPattern []azure.BlockStatus - }{ - {0, 100, "", []azure.BlockStatus{}}, // overwrite the entire blob - {0, 3, "AABBBBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // split first block - {4, 1, "BBBBBCCC", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to last char of first block - {1, 6, "BBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted}}, // overwrite splits first and second block, last block remains - {3, 8, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite a block in middle block, split end block - {10, 1, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite first byte of rightmost block - {11, 2, "", []azure.BlockStatus{}}, // overwrite the rightmost index - {13, 20, "", []azure.BlockStatus{}}, // append to the end - } - - for _, c := range cases { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 5) - rand := newBlockIDGenerator() - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) - if err != nil { - t.Fatal(err) - } - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - bx, err := rw.blocksRightSide("a", "b", c.offset, c.size, rand) - if err != nil { - t.Fatal(err) - } - - bs := []azure.BlockStatus{} - for _, v := range bx { - bs = append(bs, v.Status) - } - - if !reflect.DeepEqual(bs, c.expectedPattern) { - t.Logf("Committed blocks %v", bw) - t.Fatalf("For offset %v-size:%v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.size, c.expectedPattern, bs, bx) - } - if rw.bs.PutBlockList("a", "b", bx); err != nil { - t.Fatal(err) - } - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - cout, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - outBlob := string(cout) - if outBlob != c.expectedBlob { - t.Fatalf("For offset %v-size:%v: wrong blob contents: %v, expected: %v", c.offset, c.size, outBlob, c.expectedBlob) - } - } -} - -func TestRandomWriter_Write_NewBlob(t *testing.T) { - var ( - s = NewStorageSimulator() - rw = newRandomBlobWriter(&s, 1024*3) // 3 KB blocks - blob = randomContents(1024 * 7) // 7 KB blob - ) - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - if _, err := rw.WriteBlobAt("a", "b", 10, bytes.NewReader(blob)); err == nil { - t.Fatal("expected error, got nil") - } - if _, err := rw.WriteBlobAt("a", "b", 100000, bytes.NewReader(blob)); err == nil { - t.Fatal("expected error, got nil") - } - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(blob)); err != nil { - t.Fatal(err) - } else if expected := int64(len(blob)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if len(bx.CommittedBlocks) != 3 { - t.Fatalf("got wrong number of committed blocks: %v", len(bx.CommittedBlocks)) - } - - // Replace first 512 bytes - leftChunk := randomContents(512) - blob = append(leftChunk, blob[512:]...) - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(leftChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(leftChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 4; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) - } - - // Replace last 512 bytes with 1024 bytes - rightChunk := randomContents(1024) - offset := int64(len(blob) - 512) - blob = append(blob[:offset], rightChunk...) - if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(rightChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(rightChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 5; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) - } - - // Replace 2K-4K (overlaps 2 blocks from L/R) - newChunk := randomContents(1024 * 2) - offset = 1024 * 2 - blob = append(append(blob[:offset], newChunk...), blob[offset+int64(len(newChunk)):]...) - if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(newChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(newChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 6; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) - } - - // Replace the entire blob - newBlob := randomContents(1024 * 30) - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(newBlob)); err != nil { - t.Fatal(err) - } else if expected := int64(len(newBlob)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, newBlob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 10; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) - } else if expected, size := int64(1024*30), getBlobSize(bx); size != expected { - t.Fatalf("committed block size does not indicate blob size") - } -} - -func Test_getBlobSize(t *testing.T) { - // with some committed blocks - if expected, size := int64(151), getBlobSize(azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"A", 100}, - {"B", 50}, - {"C", 1}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"D", 200}, - }}); expected != size { - t.Fatalf("wrong blob size: %v, expected: %v", size, expected) - } - - // with no committed blocks - if expected, size := int64(0), getBlobSize(azure.BlockListResponse{ - UncommittedBlocks: []azure.BlockResponse{ - {"A", 100}, - {"B", 50}, - {"C", 1}, - {"D", 200}, - }}); expected != size { - t.Fatalf("wrong blob size: %v, expected: %v", size, expected) - } -} - -func assertBlobContents(t *testing.T, r io.Reader, expected []byte) { - out, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(out, expected) { - t.Fatalf("wrong blob contents. size: %v, expected: %v", len(out), len(expected)) - } -} - -func randomContents(length int64) []byte { - b := make([]byte, length) - for i := range b { - b[i] = byte(rand.Intn(2 << 8)) - } - return b -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go deleted file mode 100644 index 095489d2..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go +++ /dev/null @@ -1,49 +0,0 @@ -package azure - -import ( - "bytes" - "io" -) - -type blockBlobWriter interface { - GetSize(container, blob string) (int64, error) - WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) -} - -// zeroFillWriter enables writing to an offset outside a block blob's size -// by offering the chunk to the underlying writer as a contiguous data with -// the gap in between filled with NUL (zero) bytes. -type zeroFillWriter struct { - blockBlobWriter -} - -func newZeroFillWriter(b blockBlobWriter) zeroFillWriter { - w := zeroFillWriter{} - w.blockBlobWriter = b - return w -} - -// Write writes the given chunk to the specified existing blob even though -// offset is out of blob's size. The gaps are filled with zeros. Returned -// written number count does not include zeros written. -func (z *zeroFillWriter) Write(container, blob string, offset int64, chunk io.Reader) (int64, error) { - size, err := z.blockBlobWriter.GetSize(container, blob) - if err != nil { - return 0, err - } - - var reader io.Reader - var zeroPadding int64 - if offset <= size { - reader = chunk - } else { - zeroPadding = offset - size - offset = size // adjust offset to be the append index - zeros := bytes.NewReader(make([]byte, zeroPadding)) - reader = io.MultiReader(zeros, chunk) - } - - nn, err := z.blockBlobWriter.WriteBlobAt(container, blob, offset, reader) - nn -= zeroPadding - return nn, err -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go deleted file mode 100644 index 49361791..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package azure - -import ( - "bytes" - "testing" -) - -func Test_zeroFillWrite_AppendNoGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*1) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024*3 + 512) - if nn, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(firstChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - secondChunk := randomContents(256) - if nn, err := zw.Write("a", "b", int64(len(firstChunk)), bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(firstChunk, secondChunk...)) - } - -} - -func Test_zeroFillWrite_StartWithGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - chunk := randomContents(1024 * 5) - padding := int64(1024*2 + 256) - if nn, err := zw.Write("a", "b", padding, bytes.NewReader(chunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(chunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(make([]byte, padding), chunk...)) - } -} - -func Test_zeroFillWrite_AppendWithGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024*3 + 512) - if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - secondChunk := randomContents(256) - padding := int64(1024 * 4) - if nn, err := zw.Write("a", "b", int64(len(firstChunk))+padding, bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(firstChunk, append(make([]byte, padding), secondChunk...)...)) - } -} - -func Test_zeroFillWrite_LiesWithinSize(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024 * 3) - if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - // in this case, zerofill won't be used - secondChunk := randomContents(256) - if nn, err := zw.Write("a", "b", 0, bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(secondChunk, firstChunk[len(secondChunk):]...)) - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go b/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go deleted file mode 100644 index 60af06b8..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/base/base.go +++ /dev/null @@ -1,169 +0,0 @@ -// Package base provides a base implementation of the storage driver that can -// be used to implement common checks. The goal is to increase the amount of -// code sharing. -// -// The canonical approach to use this class is to embed in the exported driver -// struct such that calls are proxied through this implementation. First, -// declare the internal driver, as follows: -// -// type driver struct { ... internal ...} -// -// The resulting type should implement StorageDriver such that it can be the -// target of a Base struct. The exported type can then be declared as follows: -// -// type Driver struct { -// Base -// } -// -// Because Driver embeds Base, it effectively implements Base. If the driver -// needs to intercept a call, before going to base, Driver should implement -// that method. Effectively, Driver can intercept calls before coming in and -// driver implements the actual logic. -// -// To further shield the embed from other packages, it is recommended to -// employ a private embed struct: -// -// type baseEmbed struct { -// base.Base -// } -// -// Then, declare driver to embed baseEmbed, rather than Base directly: -// -// type Driver struct { -// baseEmbed -// } -// -// The type now implements StorageDriver, proxying through Base, without -// exporting an unnecessary field. -package base - -import ( - "io" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// Base provides a wrapper around a storagedriver implementation that provides -// common path and bounds checking. -type Base struct { - storagedriver.StorageDriver -} - -// GetContent wraps GetContent of underlying storage driver. -func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.GetContent(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.GetContent(ctx, path) -} - -// PutContent wraps PutContent of underlying storage driver. -func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { - ctx, done := context.WithTrace(ctx) - defer done("%s.PutContent(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.PutContent(ctx, path, content) -} - -// ReadStream wraps ReadStream of underlying storage driver. -func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) - - if offset < 0 { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.ReadStream(ctx, path, offset) -} - -// WriteStream wraps WriteStream of underlying storage driver. -func (base *Base) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) - - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - if !storagedriver.PathRegexp.MatchString(path) { - return 0, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.WriteStream(ctx, path, offset, reader) -} - -// Stat wraps Stat of underlying storage driver. -func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.Stat(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.Stat(ctx, path) -} - -// List wraps List of underlying storage driver. -func (base *Base) List(ctx context.Context, path string) ([]string, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.List(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) && path != "/" { - return nil, storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.List(ctx, path) -} - -// Move wraps Move of underlying storage driver. -func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { - ctx, done := context.WithTrace(ctx) - defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) - - if !storagedriver.PathRegexp.MatchString(sourcePath) { - return storagedriver.InvalidPathError{Path: sourcePath} - } else if !storagedriver.PathRegexp.MatchString(destPath) { - return storagedriver.InvalidPathError{Path: destPath} - } - - return base.StorageDriver.Move(ctx, sourcePath, destPath) -} - -// Delete wraps Delete of underlying storage driver. -func (base *Base) Delete(ctx context.Context, path string) error { - ctx, done := context.WithTrace(ctx) - defer done("%s.Delete(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.Delete(ctx, path) -} - -// URLFor wraps URLFor of underlying storage driver. -func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - ctx, done := context.WithTrace(ctx) - defer done("%s.URLFor(%q)", base.Name(), path) - - if !storagedriver.PathRegexp.MatchString(path) { - return "", storagedriver.InvalidPathError{Path: path} - } - - return base.StorageDriver.URLFor(ctx, path, options) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go b/vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go deleted file mode 100644 index e84f0026..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/factory/factory.go +++ /dev/null @@ -1,55 +0,0 @@ -package factory - -import ( - "fmt" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// driverFactories stores an internal mapping between storage driver names and their respective -// factories -var driverFactories = make(map[string]StorageDriverFactory) - -// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces -// Storage drivers should call Register() with a factory to make the driver available by name -type StorageDriverFactory interface { - // Create returns a new storagedriver.StorageDriver with the given parameters - // Parameters will vary by driver and may be ignored - // Each parameter key must only consist of lowercase letters and numbers - Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) -} - -// Register makes a storage driver available by the provided name. -// If Register is called twice with the same name or if driver factory is nil, it panics. -func Register(name string, factory StorageDriverFactory) { - if factory == nil { - panic("Must not provide nil StorageDriverFactory") - } - _, registered := driverFactories[name] - if registered { - panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) - } - - driverFactories[name] = factory -} - -// Create a new storagedriver.StorageDriver with the given name and -// parameters. To use a driver, the StorageDriverFactory must first be -// registered with the given name. If no drivers are found, an -// InvalidStorageDriverError is returned -func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - driverFactory, ok := driverFactories[name] - if !ok { - return nil, InvalidStorageDriverError{name} - } - return driverFactory.Create(parameters) -} - -// InvalidStorageDriverError records an attempt to construct an unregistered storage driver -type InvalidStorageDriverError struct { - Name string -} - -func (err InvalidStorageDriverError) Error() string { - return fmt.Sprintf("StorageDriver not registered: %s", err.Name) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go b/vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go deleted file mode 100644 index e5064029..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/fileinfo.go +++ /dev/null @@ -1,79 +0,0 @@ -package driver - -import "time" - -// FileInfo returns information about a given path. Inspired by os.FileInfo, -// it elides the base name method for a full path instead. -type FileInfo interface { - // Path provides the full path of the target of this file info. - Path() string - - // Size returns current length in bytes of the file. The return value can - // be used to write to the end of the file at path. The value is - // meaningless if IsDir returns true. - Size() int64 - - // ModTime returns the modification time for the file. For backends that - // don't have a modification time, the creation time should be returned. - ModTime() time.Time - - // IsDir returns true if the path is a directory. - IsDir() bool -} - -// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal -// should only be used by storagedriver implementations. They should moved to -// a "driver" package, similar to database/sql. - -// FileInfoFields provides the exported fields for implementing FileInfo -// interface in storagedriver implementations. It should be used with -// InternalFileInfo. -type FileInfoFields struct { - // Path provides the full path of the target of this file info. - Path string - - // Size is current length in bytes of the file. The value of this field - // can be used to write to the end of the file at path. The value is - // meaningless if IsDir is set to true. - Size int64 - - // ModTime returns the modification time for the file. For backends that - // don't have a modification time, the creation time should be returned. - ModTime time.Time - - // IsDir returns true if the path is a directory. - IsDir bool -} - -// FileInfoInternal implements the FileInfo interface. This should only be -// used by storagedriver implementations that don't have a specialized -// FileInfo type. -type FileInfoInternal struct { - FileInfoFields -} - -var _ FileInfo = FileInfoInternal{} -var _ FileInfo = &FileInfoInternal{} - -// Path provides the full path of the target of this file info. -func (fi FileInfoInternal) Path() string { - return fi.FileInfoFields.Path -} - -// Size returns current length in bytes of the file. The return value can -// be used to write to the end of the file at path. The value is -// meaningless if IsDir returns true. -func (fi FileInfoInternal) Size() int64 { - return fi.FileInfoFields.Size -} - -// ModTime returns the modification time for the file. For backends that -// don't have a modification time, the creation time should be returned. -func (fi FileInfoInternal) ModTime() time.Time { - return fi.FileInfoFields.ModTime -} - -// IsDir returns true if the path is a directory. -func (fi FileInfoInternal) IsDir() bool { - return fi.FileInfoFields.IsDir -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go deleted file mode 100644 index d5d8708c..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go +++ /dev/null @@ -1,291 +0,0 @@ -package filesystem - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "filesystem" -const defaultRootDirectory = "/var/lib/registry" - -func init() { - factory.Register(driverName, &filesystemDriverFactory{}) -} - -// filesystemDriverFactory implements the factory.StorageDriverFactory interface -type filesystemDriverFactory struct{} - -func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters), nil -} - -type driver struct { - rootDirectory string -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by a local -// filesystem. All provided paths will be subpaths of the RootDirectory. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Optional Parameters: -// - rootdirectory -func FromParameters(parameters map[string]interface{}) *Driver { - var rootDirectory = defaultRootDirectory - if parameters != nil { - rootDir, ok := parameters["rootdirectory"] - if ok { - rootDirectory = fmt.Sprint(rootDir) - } - } - return New(rootDirectory) -} - -// New constructs a new Driver with a given rootDirectory -func New(rootDirectory string) *Driver { - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: &driver{ - rootDirectory: rootDirectory, - }, - }, - }, - } -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { - if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { - return err - } - - return os.Truncate(d.fullPath(subPath), int64(len(contents))) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return nil, err - } - - seekPos, err := file.Seek(int64(offset), os.SEEK_SET) - if err != nil { - file.Close() - return nil, err - } else if seekPos < int64(offset) { - file.Close() - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - return file, nil -} - -// WriteStream stores the contents of the provided io.Reader at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, reader io.Reader) (nn int64, err error) { - // TODO(stevvooe): This needs to be a requirement. - // if !path.IsAbs(subPath) { - // return fmt.Errorf("absolute path required: %q", subPath) - // } - - fullPath := d.fullPath(subPath) - parentDir := path.Dir(fullPath) - if err := os.MkdirAll(parentDir, 0755); err != nil { - return 0, err - } - - fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - // TODO(stevvooe): A few missing conditions in storage driver: - // 1. What if the path is already a directory? - // 2. Should number 1 be exposed explicitly in storagedriver? - // 2. Can this path not exist, even if we create above? - return 0, err - } - defer fp.Close() - - nn, err = fp.Seek(offset, os.SEEK_SET) - if err != nil { - return 0, err - } - - if nn != offset { - return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) - } - - return io.Copy(fp, reader) -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { - fullPath := d.fullPath(subPath) - - fi, err := os.Stat(fullPath) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: subPath} - } - - return nil, err - } - - return fileInfo{ - path: subPath, - FileInfo: fi, - }, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { - if subPath[len(subPath)-1] != '/' { - subPath += "/" - } - fullPath := d.fullPath(subPath) - - dir, err := os.Open(fullPath) - if err != nil { - if os.IsNotExist(err) { - return nil, storagedriver.PathNotFoundError{Path: subPath} - } - return nil, err - } - - defer dir.Close() - - fileNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - - keys := make([]string, 0, len(fileNames)) - for _, fileName := range fileNames { - keys = append(keys, path.Join(subPath, fileName)) - } - - return keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - source := d.fullPath(sourcePath) - dest := d.fullPath(destPath) - - if _, err := os.Stat(source); os.IsNotExist(err) { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - - if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { - return err - } - - err := os.Rename(source, dest) - return err -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, subPath string) error { - fullPath := d.fullPath(subPath) - - _, err := os.Stat(fullPath) - if err != nil && !os.IsNotExist(err) { - return err - } else if err != nil { - return storagedriver.PathNotFoundError{Path: subPath} - } - - err = os.RemoveAll(fullPath) - return err -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod -} - -// fullPath returns the absolute path of a key within the Driver's storage. -func (d *driver) fullPath(subPath string) string { - return path.Join(d.rootDirectory, subPath) -} - -type fileInfo struct { - os.FileInfo - path string -} - -var _ storagedriver.FileInfo = fileInfo{} - -// Path provides the full path of the target of this file info. -func (fi fileInfo) Path() string { - return fi.path -} - -// Size returns current length in bytes of the file. The return value can -// be used to write to the end of the file at path. The value is -// meaningless if IsDir returns true. -func (fi fileInfo) Size() int64 { - if fi.IsDir() { - return 0 - } - - return fi.FileInfo.Size() -} - -// ModTime returns the modification time for the file. For backends that -// don't have a modification time, the creation time should be returned. -func (fi fileInfo) ModTime() time.Time { - return fi.FileInfo.ModTime() -} - -// IsDir returns true if the path is a directory. -func (fi fileInfo) IsDir() bool { - return fi.FileInfo.IsDir() -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go deleted file mode 100644 index 8b48b431..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package filesystem - -import ( - "io/ioutil" - "os" - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - . "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } - -func init() { - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return New(root), nil - }, testsuites.NeverSkip) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go deleted file mode 100644 index 2d121e1c..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go +++ /dev/null @@ -1,262 +0,0 @@ -package inmemory - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "sync" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "inmemory" - -func init() { - factory.Register(driverName, &inMemoryDriverFactory{}) -} - -// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. -type inMemoryDriverFactory struct{} - -func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return New(), nil -} - -type driver struct { - root *dir - mutex sync.RWMutex -} - -// baseEmbed allows us to hide the Base embed. -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by a local map. -// Intended solely for example and testing purposes. -type Driver struct { - baseEmbed // embedded, hidden base driver. -} - -var _ storagedriver.StorageDriver = &Driver{} - -// New constructs a new Driver. -func New() *Driver { - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: &driver{ - root: &dir{ - common: common{ - p: "/", - mod: time.Now(), - }, - }, - }, - }, - }, - } -} - -// Implement the storagedriver.StorageDriver interface. - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - rc, err := d.ReadStream(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - return ioutil.ReadAll(rc) -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - f, err := d.root.mkfile(p) - if err != nil { - // TODO(stevvooe): Again, we need to clarify when this is not a - // directory in StorageDriver API. - return fmt.Errorf("not a file") - } - - f.truncate() - f.WriteAt(contents, 0) - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - if offset < 0 { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - path = normalize(path) - found := d.root.find(path) - - if found.path() != path { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - if found.isdir() { - return nil, fmt.Errorf("%q is a directory", path) - } - - return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { - d.mutex.Lock() - defer d.mutex.Unlock() - - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - normalized := normalize(path) - - f, err := d.root.mkfile(normalized) - if err != nil { - return 0, fmt.Errorf("not a file") - } - - // Unlock while we are reading from the source, in case we are reading - // from the same mfs instance. This can be fixed by a more granular - // locking model. - d.mutex.Unlock() - d.mutex.RLock() // Take the readlock to block other writers. - var buf bytes.Buffer - - nn, err = buf.ReadFrom(reader) - if err != nil { - // TODO(stevvooe): This condition is odd and we may need to clarify: - // we've read nn bytes from reader but have written nothing to the - // backend. What is the correct return value? Really, the caller needs - // to know that the reader has been advanced and reattempting the - // operation is incorrect. - d.mutex.RUnlock() - d.mutex.Lock() - return nn, err - } - - d.mutex.RUnlock() - d.mutex.Lock() - f.WriteAt(buf.Bytes(), offset) - return nn, err -} - -// Stat returns info about the provided path. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - normalized := normalize(path) - found := d.root.find(path) - - if found.path() != normalized { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - fi := storagedriver.FileInfoFields{ - Path: path, - IsDir: found.isdir(), - ModTime: found.modtime(), - } - - if !fi.IsDir { - fi.Size = int64(len(found.(*file).data)) - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - d.mutex.RLock() - defer d.mutex.RUnlock() - - normalized := normalize(path) - - found := d.root.find(normalized) - - if !found.isdir() { - return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... - } - - entries, err := found.(*dir).list(normalized) - - if err != nil { - switch err { - case errNotExists: - return nil, storagedriver.PathNotFoundError{Path: path} - case errIsNotDir: - return nil, fmt.Errorf("not a directory") - default: - return nil, err - } - } - - return entries, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) - - err := d.root.move(normalizedSrc, normalizedDst) - switch err { - case errNotExists: - return storagedriver.PathNotFoundError{Path: destPath} - default: - return err - } -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - d.mutex.Lock() - defer d.mutex.Unlock() - - normalized := normalize(path) - - err := d.root.delete(normalized) - switch err { - case errNotExists: - return storagedriver.PathNotFoundError{Path: path} - default: - return err - } -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go deleted file mode 100644 index dbc1916f..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/driver_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package inmemory - -import ( - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -func init() { - inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { - return New(), nil - } - testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go b/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go deleted file mode 100644 index cdefacfd..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/inmemory/mfs.go +++ /dev/null @@ -1,338 +0,0 @@ -package inmemory - -import ( - "fmt" - "io" - "path" - "sort" - "strings" - "time" -) - -var ( - errExists = fmt.Errorf("exists") - errNotExists = fmt.Errorf("notexists") - errIsNotDir = fmt.Errorf("notdir") - errIsDir = fmt.Errorf("isdir") -) - -type node interface { - name() string - path() string - isdir() bool - modtime() time.Time -} - -// dir is the central type for the memory-based storagedriver. All operations -// are dispatched from a root dir. -type dir struct { - common - - // TODO(stevvooe): Use sorted slice + search. - children map[string]node -} - -var _ node = &dir{} - -func (d *dir) isdir() bool { - return true -} - -// add places the node n into dir d. -func (d *dir) add(n node) { - if d.children == nil { - d.children = make(map[string]node) - } - - d.children[n.name()] = n - d.mod = time.Now() -} - -// find searches for the node, given path q in dir. If the node is found, it -// will be returned. If the node is not found, the closet existing parent. If -// the node is found, the returned (node).path() will match q. -func (d *dir) find(q string) node { - q = strings.Trim(q, "/") - i := strings.Index(q, "/") - - if q == "" { - return d - } - - if i == 0 { - panic("shouldn't happen, no root paths") - } - - var component string - if i < 0 { - // No more path components - component = q - } else { - component = q[:i] - } - - child, ok := d.children[component] - if !ok { - // Node was not found. Return p and the current node. - return d - } - - if child.isdir() { - // traverse down! - q = q[i+1:] - return child.(*dir).find(q) - } - - return child -} - -func (d *dir) list(p string) ([]string, error) { - n := d.find(p) - - if n.path() != p { - return nil, errNotExists - } - - if !n.isdir() { - return nil, errIsNotDir - } - - var children []string - for _, child := range n.(*dir).children { - children = append(children, child.path()) - } - - sort.Strings(children) - return children, nil -} - -// mkfile or return the existing one. returns an error if it exists and is a -// directory. Essentially, this is open or create. -func (d *dir) mkfile(p string) (*file, error) { - n := d.find(p) - if n.path() == p { - if n.isdir() { - return nil, errIsDir - } - - return n.(*file), nil - } - - dirpath, filename := path.Split(p) - // Make any non-existent directories - n, err := d.mkdirs(dirpath) - if err != nil { - return nil, err - } - - dd := n.(*dir) - n = &file{ - common: common{ - p: path.Join(dd.path(), filename), - mod: time.Now(), - }, - } - - dd.add(n) - return n.(*file), nil -} - -// mkdirs creates any missing directory entries in p and returns the result. -func (d *dir) mkdirs(p string) (*dir, error) { - p = normalize(p) - - n := d.find(p) - - if !n.isdir() { - // Found something there - return nil, errIsNotDir - } - - if n.path() == p { - return n.(*dir), nil - } - - dd := n.(*dir) - - relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") - - if relative == "" { - return dd, nil - } - - components := strings.Split(relative, "/") - for _, component := range components { - d, err := dd.mkdir(component) - - if err != nil { - // This should actually never happen, since there are no children. - return nil, err - } - dd = d - } - - return dd, nil -} - -// mkdir creates a child directory under d with the given name. -func (d *dir) mkdir(name string) (*dir, error) { - if name == "" { - return nil, fmt.Errorf("invalid dirname") - } - - _, ok := d.children[name] - if ok { - return nil, errExists - } - - child := &dir{ - common: common{ - p: path.Join(d.path(), name), - mod: time.Now(), - }, - } - d.add(child) - d.mod = time.Now() - - return child, nil -} - -func (d *dir) move(src, dst string) error { - dstDirname, _ := path.Split(dst) - - dp, err := d.mkdirs(dstDirname) - if err != nil { - return err - } - - srcDirname, srcFilename := path.Split(src) - sp := d.find(srcDirname) - - if normalize(srcDirname) != normalize(sp.path()) { - return errNotExists - } - - spd, ok := sp.(*dir) - if !ok { - return errIsNotDir // paranoid. - } - - s, ok := spd.children[srcFilename] - if !ok { - return errNotExists - } - - delete(spd.children, srcFilename) - - switch n := s.(type) { - case *dir: - n.p = dst - case *file: - n.p = dst - } - - dp.add(s) - - return nil -} - -func (d *dir) delete(p string) error { - dirname, filename := path.Split(p) - parent := d.find(dirname) - - if normalize(dirname) != normalize(parent.path()) { - return errNotExists - } - - if _, ok := parent.(*dir).children[filename]; !ok { - return errNotExists - } - - delete(parent.(*dir).children, filename) - return nil -} - -// dump outputs a primitive directory structure to stdout. -func (d *dir) dump(indent string) { - fmt.Println(indent, d.name()+"/") - - for _, child := range d.children { - if child.isdir() { - child.(*dir).dump(indent + "\t") - } else { - fmt.Println(indent, child.name()) - } - - } -} - -func (d *dir) String() string { - return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) -} - -// file stores actual data in the fs tree. It acts like an open, seekable file -// where operations are conducted through ReadAt and WriteAt. Use it with -// SectionReader for the best effect. -type file struct { - common - data []byte -} - -var _ node = &file{} - -func (f *file) isdir() bool { - return false -} - -func (f *file) truncate() { - f.data = f.data[:0] -} - -func (f *file) sectionReader(offset int64) io.Reader { - return io.NewSectionReader(f, offset, int64(len(f.data))-offset) -} - -func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { - return copy(p, f.data[offset:]), nil -} - -func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { - off := int(offset) - if cap(f.data) < off+len(p) { - data := make([]byte, len(f.data), off+len(p)) - copy(data, f.data) - f.data = data - } - - f.mod = time.Now() - f.data = f.data[:off+len(p)] - - return copy(f.data[off:off+len(p)], p), nil -} - -func (f *file) String() string { - return fmt.Sprintf("&file{path: %q}", f.p) -} - -// common provides shared fields and methods for node implementations. -type common struct { - p string - mod time.Time -} - -func (c *common) name() string { - _, name := path.Split(c.p) - return name -} - -func (c *common) path() string { - return c.p -} - -func (c *common) modtime() time.Time { - return c.mod -} - -func normalize(p string) string { - return "/" + strings.Trim(p, "/") -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go deleted file mode 100644 index 31c00afc..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package middleware - cloudfront wrapper for storage libs -// N.B. currently only works with S3, not arbitrary sites -// -package middleware - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "time" - - "github.com/AdRoll/goamz/cloudfront" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" -) - -// cloudFrontStorageMiddleware provides an simple implementation of layerHandler that -// constructs temporary signed CloudFront URLs from the storagedriver layer URL, -// then issues HTTP Temporary Redirects to this CloudFront content URL. -type cloudFrontStorageMiddleware struct { - storagedriver.StorageDriver - cloudfront *cloudfront.CloudFront - duration time.Duration -} - -var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} - -// newCloudFrontLayerHandler constructs and returns a new CloudFront -// LayerHandler implementation. -// Required options: baseurl, privatekey, keypairid -func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { - base, ok := options["baseurl"] - if !ok { - return nil, fmt.Errorf("No baseurl provided") - } - baseURL, ok := base.(string) - if !ok { - return nil, fmt.Errorf("baseurl must be a string") - } - pk, ok := options["privatekey"] - if !ok { - return nil, fmt.Errorf("No privatekey provided") - } - pkPath, ok := pk.(string) - if !ok { - return nil, fmt.Errorf("privatekey must be a string") - } - kpid, ok := options["keypairid"] - if !ok { - return nil, fmt.Errorf("No keypairid provided") - } - keypairID, ok := kpid.(string) - if !ok { - return nil, fmt.Errorf("keypairid must be a string") - } - - pkBytes, err := ioutil.ReadFile(pkPath) - if err != nil { - return nil, fmt.Errorf("Failed to read privatekey file: %s", err) - } - - block, _ := pem.Decode([]byte(pkBytes)) - if block == nil { - return nil, fmt.Errorf("Failed to decode private key as an rsa private key") - } - privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, err - } - - cf := cloudfront.New(baseURL, privateKey, keypairID) - - duration := 20 * time.Minute - d, ok := options["duration"] - if ok { - switch d := d.(type) { - case time.Duration: - duration = d - case string: - dur, err := time.ParseDuration(d) - if err != nil { - return nil, fmt.Errorf("Invalid duration: %s", err) - } - duration = dur - } - } - - return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil -} - -// S3BucketKeyer is any type that is capable of returning the S3 bucket key -// which should be cached by AWS CloudFront. -type S3BucketKeyer interface { - S3BucketKey(path string) string -} - -// Resolve returns an http.Handler which can serve the contents of the given -// Layer, or an error if not supported by the storagedriver. -func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - // TODO(endophage): currently only supports S3 - keyer, ok := lh.StorageDriver.(S3BucketKeyer) - if !ok { - context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") - return lh.StorageDriver.URLFor(ctx, path, options) - } - - cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) - if err != nil { - return "", err - } - return cfURL, nil -} - -// init registers the cloudfront layerHandler backend. -func init() { - storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go b/vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go deleted file mode 100644 index 7e40a8dd..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/middleware/storagemiddleware.go +++ /dev/null @@ -1,39 +0,0 @@ -package storagemiddleware - -import ( - "fmt" - - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// InitFunc is the type of a StorageMiddleware factory function and is -// used to register the constructor for different StorageMiddleware backends. -type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) - -var storageMiddlewares map[string]InitFunc - -// Register is used to register an InitFunc for -// a StorageMiddleware backend with the given name. -func Register(name string, initFunc InitFunc) error { - if storageMiddlewares == nil { - storageMiddlewares = make(map[string]InitFunc) - } - if _, exists := storageMiddlewares[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - storageMiddlewares[name] = initFunc - - return nil -} - -// Get constructs a StorageMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { - if storageMiddlewares != nil { - if initFunc, exists := storageMiddlewares[name]; exists { - return initFunc(storageDriver, options) - } - } - - return nil, fmt.Errorf("no storage middleware registered with name: %s", name) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go deleted file mode 100644 index d1bc932f..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/oss/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package oss implements the Aliyun OSS Storage driver backend. Support can be -// enabled by including the "include_oss" build tag. -package oss diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go deleted file mode 100644 index cec32026..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss.go +++ /dev/null @@ -1,813 +0,0 @@ -// Package oss provides a storagedriver.StorageDriver implementation to -// store blobs in Aliyun OSS cloud storage. -// -// This package leverages the denverdino/aliyungo client library for interfacing with -// oss. -// -// Because OSS is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// +build include_oss - -package oss - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/docker/distribution/context" - - "github.com/Sirupsen/logrus" - "github.com/denverdino/aliyungo/oss" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "oss" - -// minChunkSize defines the minimum multipart upload chunk size -// OSS API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize - -// listMax is the largest amount of objects you can request from OSS in a list call -const listMax = 1000 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKeyID string - AccessKeySecret string - Bucket string - Region oss.Region - Internal bool - Encrypt bool - Secure bool - ChunkSize int64 - RootDirectory string - Endpoint string -} - -func init() { - factory.Register(driverName, &ossDriverFactory{}) -} - -// ossDriverFactory implements the factory.StorageDriverFactory interface -type ossDriverFactory struct{} - -func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Client *oss.Client - Bucket *oss.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string - - pool sync.Pool // pool []byte buffers used for WriteStream - zeros []byte // shared, zero-valued buffer used for WriteStream -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) - accessKey, ok := parameters["accesskeyid"] - if !ok { - return nil, fmt.Errorf("No accesskeyid parameter provided") - } - secretKey, ok := parameters["accesskeysecret"] - if !ok { - return nil, fmt.Errorf("No accesskeysecret parameter provided") - } - - regionName, ok := parameters["region"] - if !ok || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - internalBool := false - internal, ok := parameters["internal"] - if ok { - internalBool, ok = internal.(bool) - if !ok { - return nil, fmt.Errorf("The internal parameter should be a boolean") - } - } - - encryptBool := false - encrypt, ok := parameters["encrypt"] - if ok { - encryptBool, ok = encrypt.(bool) - if !ok { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - } - - secureBool := true - secure, ok := parameters["secure"] - if ok { - secureBool, ok = secure.(bool) - if !ok { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - default: - return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - } - - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - - endpoint, ok := parameters["endpoint"] - if !ok { - endpoint = "" - } - - params := DriverParameters{ - AccessKeyID: fmt.Sprint(accessKey), - AccessKeySecret: fmt.Sprint(secretKey), - Bucket: fmt.Sprint(bucket), - Region: oss.Region(fmt.Sprint(regionName)), - ChunkSize: chunkSize, - RootDirectory: fmt.Sprint(rootDirectory), - Encrypt: encryptBool, - Secure: secureBool, - Internal: internalBool, - Endpoint: fmt.Sprint(endpoint), - } - - return New(params) -} - -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - - client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) - client.SetEndpoint(params.Endpoint) - bucket := client.Bucket(params.Bucket) - - // Validate that the given credentials have at least read permissions in the - // given bucket scope. - if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { - return nil, err - } - - // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new OSS client while another one is running on the same bucket. - - d := &driver{ - Client: client, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, - zeros: make([]byte, params.ChunkSize), - } - - d.pool.New = func() interface{} { - return make([]byte, d.ChunkSize) - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Bucket.Get(d.ossPath(path)) - if err != nil { - return nil, parseError(path, err) - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") - - resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) - if err != nil { - return nil, parseError(path, err) - } - - // Due to Aliyun OSS API, status 200 and whole object will be return instead of an - // InvalidRange error when range is invalid. - // - // OSS sever will always return http.StatusPartialContent if range is acceptable. - if resp.StatusCode != http.StatusPartialContent { - resp.Body.Close() - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return resp.Body, nil -} - -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - partNumber := 1 - bytesRead := 0 - var putErrChan chan error - parts := []oss.Part{} - var part oss.Part - done := make(chan struct{}) // stopgap to free up waiting goroutines - - multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return 0, err - } - - buf := d.getbuf() - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } - } - - if len(parts) > 0 { - if multi == nil { - // Parts should be empty if the multi is not initialized - panic("Unreachable") - } else { - if multi.Complete(parts) != nil { - multi.Abort() - } - } - } - - d.putbuf(buf) // needs to be here to pick up new buf value - close(done) // free up any waiting goroutines - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(ctx, path, 0) - if err != nil { - return err - } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - - } - return nil - } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - defer d.putbuf(buf) // this buffer gets dropped after this call - - // DRAGONS(stevvooe): There are few things one might want to know - // about this section. First, the putErrChan is expecting an error - // and a nil or just a nil to come through the channel. This is - // covered by the silly defer below. The other aspect is the OSS - // retry backoff to deal with RequestTimeout errors. Even though - // the underlying OSS library should handle it, it doesn't seem to - // be part of the shouldRetry function (see denverdino/aliyungo/oss). - defer func() { - select { - case putErrChan <- nil: // for some reason, we do this no matter what. - case <-done: - return // ensure we don't leak the goroutine - } - }() - - if bytesRead <= 0 { - return - } - - var err error - var part oss.Part - - loop: - for retries := 0; retries < 5; retries++ { - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) - if err == nil { - break // success! - } - - // NOTE(stevvooe): This retry code tries to only retry under - // conditions where the OSS package does not. We may add oss - // error codes to the below if we see others bubble up in the - // application. Right now, the most troubling is - // RequestTimeout, which seems to only triggered when a tcp - // connection to OSS slows to a crawl. If the RequestTimeout - // ends up getting added to the OSS library and we don't see - // other errors, this retry loop can be removed. - switch err := err.(type) { - case *oss.Error: - switch err.Code { - case "RequestTimeout": - // allow retries on only this error. - default: - break loop - } - } - - backoff := 100 * time.Millisecond * time.Duration(retries+1) - logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) - time.Sleep(backoff) - } - - if err != nil { - logrus.Errorf("error putting part, aborting: %v", err) - select { - case putErrChan <- err: - case <-done: - return // don't leak the goroutine - } - } - - // parts and partNumber are safe, because this function is the - // only one modifying them and we force it to be executed - // serially. - parts = append(parts, part) - partNumber++ - }(bytesRead, from, buf) - - buf = d.getbuf() // use a new buffer for the next call - return nil - } - - if offset > 0 { - resp, err := d.Bucket.Head(d.ossPath(path), nil) - if err != nil { - if ossErr, ok := err.(*oss.Error); !ok || ossErr.Code != "NoSuchKey" { - return 0, err - } - } - - currentLength := int64(0) - if err == nil { - currentLength = resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Path(d.ossPath(path))) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, part) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) - if err != nil { - return totalRead, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - oss.CopyOptions{}, - d.Bucket.Path(d.ossPath(path))) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - - } - } - - for { - if err = fromReader(0); err != nil { - return totalRead, err - } - - if int64(bytesRead) < d.ChunkSize { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(listResponse.Contents) == 1 { - if listResponse.Contents[0].Key != d.ossPath(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = listResponse.Contents[0].Size - - timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) - if err != nil { - return nil, err - } - fi.ModTime = timestamp - } - } else if len(listResponse.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - if path != "/" && path[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.ossPath("") == "" { - prefix = "/" - } - - listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) - if err != nil { - return nil, err - } - - files := []string{} - directories := []string{} - - for { - for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) - } - - for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) - } - - if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } else { - break - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - logrus.Infof("Move from %s to %s", d.Bucket.Path("/"+d.ossPath(sourcePath)), d.ossPath(destPath)) - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(d.ossPath(destPath), getPermissions(), - oss.CopyOptions{ - //Options: d.getOptions(), - //ContentType: d.getContentType() - }, - d.Bucket.Path(d.ossPath(sourcePath))) - if err != nil { - return parseError(sourcePath, err) - } - - return d.Delete(ctx, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) - if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - ossObjects := make([]oss.Object, listMax) - - for len(listResponse.Contents) > 0 { - for index, key := range listResponse.Contents { - ossObjects[index].Key = key.Key - } - - err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) - if err != nil { - return nil - } - - listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - logrus.Infof("expiresTime: %d", expiresTime) - - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - logrus.Infof("expiresTime: %d", expiresTime) - testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) - logrus.Infof("testURL: %s", testURL) - return testURL, nil -} - -func (d *driver) ossPath(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -// S3BucketKey returns the OSS bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).ossPath(path) -} - -func parseError(path string, err error) error { - if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func hasCode(err error, code string) bool { - ossErr, ok := err.(*oss.Error) - return ok && ossErr.Code == code -} - -func (d *driver) getOptions() oss.Options { - return oss.Options{ServerSideEncryption: d.Encrypt} -} - -func getPermissions() oss.ACL { - return oss.Private -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -// getbuf returns a buffer from the driver's pool with length d.ChunkSize. -func (d *driver) getbuf() []byte { - return d.pool.Get().([]byte) -} - -func (d *driver) putbuf(p []byte) { - copy(p, d.zeros) - d.pool.Put(p) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go deleted file mode 100644 index fbae5d9c..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/oss/oss_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// +build include_oss - -package oss - -import ( - "io/ioutil" - - alioss "github.com/denverdino/aliyungo/oss" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - //"log" - "os" - "strconv" - "testing" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var ossDriverConstructor func(rootDirectory string) (*Driver, error) - -var skipCheck func() string - -func init() { - accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") - secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") - bucket := os.Getenv("OSS_BUCKET") - region := os.Getenv("OSS_REGION") - internal := os.Getenv("OSS_INTERNAL") - encrypt := os.Getenv("OSS_ENCRYPT") - secure := os.Getenv("OSS_SECURE") - endpoint := os.Getenv("OSS_ENDPOINT") - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - ossDriverConstructor = func(rootDirectory string) (*Driver, error) { - encryptBool := false - if encrypt != "" { - encryptBool, err = strconv.ParseBool(encrypt) - if err != nil { - return nil, err - } - } - - secureBool := false - if secure != "" { - secureBool, err = strconv.ParseBool(secure) - if err != nil { - return nil, err - } - } - - internalBool := false - if internal != "" { - internalBool, err = strconv.ParseBool(internal) - if err != nil { - return nil, err - } - } - - parameters := DriverParameters{ - AccessKeyID: accessKey, - AccessKeySecret: secretKey, - Bucket: bucket, - Region: alioss.Region(region), - Internal: internalBool, - ChunkSize: minChunkSize, - RootDirectory: rootDirectory, - Encrypt: encryptBool, - Secure: secureBool, - Endpoint: endpoint, - } - - return New(parameters) - } - - // Skip OSS storage driver tests if environment variable parameters are not provided - skipCheck = func() string { - if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" - } - return "" - } - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return ossDriverConstructor(root) - }, skipCheck) -} - -func TestEmptyRootList(t *testing.T) { - if skipCheck() != "" { - t.Skip(skipCheck()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := ossDriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := ossDriverConstructor("") - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := ossDriverConstructor("/") - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := context.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rootedDriver.Delete(ctx, filename) - - keys, err := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, err = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/rados/doc.go b/vendor/github.com/docker/distribution/registry/storage/driver/rados/doc.go deleted file mode 100644 index 655c68a3..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/rados/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package rados implements the rados storage driver backend. Support can be -// enabled by including the "include_rados" build tag. -package rados diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/rados/rados.go b/vendor/github.com/docker/distribution/registry/storage/driver/rados/rados.go deleted file mode 100644 index 0ea10a89..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/rados/rados.go +++ /dev/null @@ -1,630 +0,0 @@ -// +build include_rados - -package rados - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "path" - "strconv" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/uuid" - "github.com/noahdesu/go-ceph/rados" -) - -const driverName = "rados" - -// Prefix all the stored blob -const objectBlobPrefix = "blob:" - -// Stripes objects size to 4M -const defaultChunkSize = 4 << 20 -const defaultXattrTotalSizeName = "total-size" - -// Max number of keys fetched from omap at each read operation -const defaultKeysFetched = 1 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - poolname string - username string - chunksize uint64 -} - -func init() { - factory.Register(driverName, &radosDriverFactory{}) -} - -// radosDriverFactory implements the factory.StorageDriverFactory interface -type radosDriverFactory struct{} - -func (factory *radosDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Conn *rados.Conn - Ioctx *rados.IOContext - chunksize uint64 -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Ceph RADOS -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - poolname: the ceph pool name -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - - pool, ok := parameters["poolname"] - if !ok { - return nil, fmt.Errorf("No poolname parameter provided") - } - - username, ok := parameters["username"] - if !ok { - username = "" - } - - chunksize := uint64(defaultChunkSize) - chunksizeParam, ok := parameters["chunksize"] - if ok { - chunksize, ok = chunksizeParam.(uint64) - if !ok { - return nil, fmt.Errorf("The chunksize parameter should be a number") - } - } - - params := DriverParameters{ - fmt.Sprint(pool), - fmt.Sprint(username), - chunksize, - } - - return New(params) -} - -// New constructs a new Driver -func New(params DriverParameters) (*Driver, error) { - var conn *rados.Conn - var err error - - if params.username != "" { - log.Infof("Opening connection to pool %s using user %s", params.poolname, params.username) - conn, err = rados.NewConnWithUser(params.username) - } else { - log.Infof("Opening connection to pool %s", params.poolname) - conn, err = rados.NewConn() - } - - if err != nil { - return nil, err - } - - err = conn.ReadDefaultConfigFile() - if err != nil { - return nil, err - } - - err = conn.Connect() - if err != nil { - return nil, err - } - - log.Infof("Connected") - - ioctx, err := conn.OpenIOContext(params.poolname) - - log.Infof("Connected to pool %s", params.poolname) - - if err != nil { - return nil, err - } - - d := &driver{ - Ioctx: ioctx, - Conn: conn, - chunksize: params.chunksize, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if _, err := d.WriteStream(ctx, path, 0, bytes.NewReader(contents)); err != nil { - return err - } - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -type readStreamReader struct { - driver *driver - oid string - size uint64 - offset uint64 -} - -func (r *readStreamReader) Read(b []byte) (n int, err error) { - // Determine the part available to read - bufferOffset := uint64(0) - bufferSize := uint64(len(b)) - - // End of the object, read less than the buffer size - if bufferSize > r.size-r.offset { - bufferSize = r.size - r.offset - } - - // Fill `b` - for bufferOffset < bufferSize { - // Get the offset in the object chunk - chunkedOid, chunkedOffset := r.driver.getChunkNameFromOffset(r.oid, r.offset) - - // Determine the best size to read - bufferEndOffset := bufferSize - if bufferEndOffset-bufferOffset > r.driver.chunksize-chunkedOffset { - bufferEndOffset = bufferOffset + (r.driver.chunksize - chunkedOffset) - } - - // Read the chunk - n, err = r.driver.Ioctx.Read(chunkedOid, b[bufferOffset:bufferEndOffset], chunkedOffset) - - if err != nil { - return int(bufferOffset), err - } - - bufferOffset += uint64(n) - r.offset += uint64(n) - } - - // EOF if the offset is at the end of the object - if r.offset == r.size { - return int(bufferOffset), io.EOF - } - - return int(bufferOffset), nil -} - -func (r *readStreamReader) Close() error { - return nil -} - -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - // get oid from filename - oid, err := d.getOid(path) - - if err != nil { - return nil, err - } - - // get object stat - stat, err := d.Stat(ctx, path) - - if err != nil { - return nil, err - } - - if offset > stat.Size() { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - return &readStreamReader{ - driver: d, - oid: oid, - size: uint64(stat.Size()), - offset: uint64(offset), - }, nil -} - -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - buf := make([]byte, d.chunksize) - totalRead = 0 - - oid, err := d.getOid(path) - if err != nil { - switch err.(type) { - // Trying to write new object, generate new blob identifier for it - case storagedriver.PathNotFoundError: - oid = d.generateOid() - err = d.putOid(path, oid) - if err != nil { - return 0, err - } - default: - return 0, err - } - } else { - // Check total object size only for existing ones - totalSize, err := d.getXattrTotalSize(ctx, oid) - if err != nil { - return 0, err - } - - // If offset if after the current object size, fill the gap with zeros - for totalSize < uint64(offset) { - sizeToWrite := d.chunksize - if totalSize-uint64(offset) < sizeToWrite { - sizeToWrite = totalSize - uint64(offset) - } - - chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(totalSize)) - err = d.Ioctx.Write(chunkName, buf[:sizeToWrite], uint64(chunkOffset)) - if err != nil { - return totalRead, err - } - - totalSize += sizeToWrite - } - } - - // Writer - for { - // Align to chunk size - sizeRead := uint64(0) - sizeToRead := uint64(offset+totalRead) % d.chunksize - if sizeToRead == 0 { - sizeToRead = d.chunksize - } - - // Read from `reader` - for sizeRead < sizeToRead { - nn, err := reader.Read(buf[sizeRead:sizeToRead]) - sizeRead += uint64(nn) - - if err != nil { - if err != io.EOF { - return totalRead, err - } - - break - } - } - - // End of file and nothing was read - if sizeRead == 0 { - break - } - - // Write chunk object - chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(offset+totalRead)) - err = d.Ioctx.Write(chunkName, buf[:sizeRead], uint64(chunkOffset)) - - if err != nil { - return totalRead, err - } - - // Update total object size as xattr in the first chunk of the object - err = d.setXattrTotalSize(oid, uint64(offset+totalRead)+sizeRead) - if err != nil { - return totalRead, err - } - - totalRead += int64(sizeRead) - - // End of file - if sizeRead < sizeToRead { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - // get oid from filename - oid, err := d.getOid(path) - - if err != nil { - return nil, err - } - - // the path is a virtual directory? - if oid == "" { - return storagedriver.FileInfoInternal{ - FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: 0, - IsDir: true, - }, - }, nil - } - - // stat first chunk - stat, err := d.Ioctx.Stat(oid + "-0") - - if err != nil { - return nil, err - } - - // get total size of chunked object - totalSize, err := d.getXattrTotalSize(ctx, oid) - - if err != nil { - return nil, err - } - - return storagedriver.FileInfoInternal{ - FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: int64(totalSize), - ModTime: stat.ModTime, - }, - }, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { - files, err := d.listDirectoryOid(dirPath) - - if err != nil { - return nil, err - } - - keys := make([]string, 0, len(files)) - for k := range files { - keys = append(keys, path.Join(dirPath, k)) - } - - return keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - // Get oid - oid, err := d.getOid(sourcePath) - - if err != nil { - return err - } - - // Move reference - err = d.putOid(destPath, oid) - - if err != nil { - return err - } - - // Delete old reference - err = d.deleteOid(sourcePath) - - if err != nil { - return err - } - - return nil -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, objectPath string) error { - // Get oid - oid, err := d.getOid(objectPath) - - if err != nil { - return err - } - - // Deleting virtual directory - if oid == "" { - objects, err := d.listDirectoryOid(objectPath) - if err != nil { - return err - } - - for object := range objects { - err = d.Delete(ctx, path.Join(objectPath, object)) - if err != nil { - return err - } - } - } else { - // Delete object chunks - totalSize, err := d.getXattrTotalSize(ctx, oid) - - if err != nil { - return err - } - - for offset := uint64(0); offset < totalSize; offset += d.chunksize { - chunkName, _ := d.getChunkNameFromOffset(oid, offset) - - err = d.Ioctx.Delete(chunkName) - if err != nil { - return err - } - } - - // Delete reference - err = d.deleteOid(objectPath) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod -} - -// Generate a blob identifier -func (d *driver) generateOid() string { - return objectBlobPrefix + uuid.Generate().String() -} - -// Reference a object and its hierarchy -func (d *driver) putOid(objectPath string, oid string) error { - directory := path.Dir(objectPath) - base := path.Base(objectPath) - createParentReference := true - - // After creating this reference, skip the parents referencing since the - // hierarchy already exists - if oid == "" { - firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) - if (err == nil) && (len(firstReference) > 0) { - createParentReference = false - } - } - - oids := map[string][]byte{ - base: []byte(oid), - } - - // Reference object - err := d.Ioctx.SetOmap(directory, oids) - if err != nil { - return err - } - - // Esure parent virtual directories - if createParentReference && directory != "/" { - return d.putOid(directory, "") - } - - return nil -} - -// Get the object identifier from an object name -func (d *driver) getOid(objectPath string) (string, error) { - directory := path.Dir(objectPath) - base := path.Base(objectPath) - - files, err := d.Ioctx.GetOmapValues(directory, "", base, 1) - - if (err != nil) || (files[base] == nil) { - return "", storagedriver.PathNotFoundError{Path: objectPath} - } - - return string(files[base]), nil -} - -// List the objects of a virtual directory -func (d *driver) listDirectoryOid(path string) (list map[string][]byte, err error) { - return d.Ioctx.GetAllOmapValues(path, "", "", defaultKeysFetched) -} - -// Remove a file from the files hierarchy -func (d *driver) deleteOid(objectPath string) error { - // Remove object reference - directory := path.Dir(objectPath) - base := path.Base(objectPath) - err := d.Ioctx.RmOmapKeys(directory, []string{base}) - - if err != nil { - return err - } - - // Remove virtual directory if empty (no more references) - firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) - - if err != nil { - return err - } - - if len(firstReference) == 0 { - // Delete omap - err := d.Ioctx.Delete(directory) - - if err != nil { - return err - } - - // Remove reference on parent omaps - if directory != "/" { - return d.deleteOid(directory) - } - } - - return nil -} - -// Takes an offset in an chunked object and return the chunk name and a new -// offset in this chunk object -func (d *driver) getChunkNameFromOffset(oid string, offset uint64) (string, uint64) { - chunkID := offset / d.chunksize - chunkedOid := oid + "-" + strconv.FormatInt(int64(chunkID), 10) - chunkedOffset := offset % d.chunksize - return chunkedOid, chunkedOffset -} - -// Set the total size of a chunked object `oid` -func (d *driver) setXattrTotalSize(oid string, size uint64) error { - // Convert uint64 `size` to []byte - xattr := make([]byte, binary.MaxVarintLen64) - binary.LittleEndian.PutUint64(xattr, size) - - // Save the total size as a xattr in the first chunk - return d.Ioctx.SetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) -} - -// Get the total size of the chunked object `oid` stored as xattr -func (d *driver) getXattrTotalSize(ctx context.Context, oid string) (uint64, error) { - // Fetch xattr as []byte - xattr := make([]byte, binary.MaxVarintLen64) - xattrLength, err := d.Ioctx.GetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) - - if err != nil { - return 0, err - } - - if xattrLength != len(xattr) { - context.GetLogger(ctx).Errorf("object %s xattr length mismatch: %d != %d", oid, xattrLength, len(xattr)) - return 0, storagedriver.PathNotFoundError{Path: oid} - } - - // Convert []byte as uint64 - totalSize := binary.LittleEndian.Uint64(xattr) - - return totalSize, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go deleted file mode 100644 index ce367fb5..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build include_rados - -package rados - -import ( - "os" - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -func init() { - poolname := os.Getenv("RADOS_POOL") - username := os.Getenv("RADOS_USER") - - driverConstructor := func() (storagedriver.StorageDriver, error) { - parameters := DriverParameters{ - poolname, - username, - defaultChunkSize, - } - - return New(parameters) - } - - skipCheck := func() string { - if poolname == "" { - return "RADOS_POOL must be set to run Rado tests" - } - return "" - } - - testsuites.RegisterSuite(driverConstructor, skipCheck) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3/s3.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3/s3.go deleted file mode 100644 index 552c221d..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/s3/s3.go +++ /dev/null @@ -1,826 +0,0 @@ -// Package s3 provides a storagedriver.StorageDriver implementation to -// store blobs in Amazon S3 cloud storage. -// -// This package leverages the AdRoll/goamz client library for interfacing with -// s3. -// -// Because s3 is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// Keep in mind that s3 guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. -package s3 - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" - "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "s3" - -// minChunkSize defines the minimum multipart upload chunk size -// S3 API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize - -// listMax is the largest amount of objects you can request from S3 in a list call -const listMax = 1000 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKey string - SecretKey string - Bucket string - Region aws.Region - Encrypt bool - Secure bool - V4Auth bool - ChunkSize int64 - RootDirectory string -} - -func init() { - factory.Register(driverName, &s3DriverFactory{}) -} - -// s3DriverFactory implements the factory.StorageDriverFactory interface -type s3DriverFactory struct{} - -func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - S3 *s3.S3 - Bucket *s3.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string - - pool sync.Pool // pool []byte buffers used for WriteStream - zeros []byte // shared, zero-valued buffer used for WriteStream -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) - accessKey, ok := parameters["accesskey"] - if !ok { - accessKey = "" - } - secretKey, ok := parameters["secretkey"] - if !ok { - secretKey = "" - } - - regionName, ok := parameters["region"] - if !ok || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - region := aws.GetRegion(fmt.Sprint(regionName)) - if region.Name == "" { - return nil, fmt.Errorf("Invalid region provided: %v", region) - } - - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - encryptBool := false - encrypt, ok := parameters["encrypt"] - if ok { - encryptBool, ok = encrypt.(bool) - if !ok { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - } - - secureBool := true - secure, ok := parameters["secure"] - if ok { - secureBool, ok = secure.(bool) - if !ok { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - } - - v4AuthBool := false - v4Auth, ok := parameters["v4auth"] - if ok { - v4AuthBool, ok = v4Auth.(bool) - if !ok { - return nil, fmt.Errorf("The v4auth parameter should be a boolean") - } - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - default: - return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - } - - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - - params := DriverParameters{ - fmt.Sprint(accessKey), - fmt.Sprint(secretKey), - fmt.Sprint(bucket), - region, - encryptBool, - secureBool, - v4AuthBool, - chunkSize, - fmt.Sprint(rootDirectory), - } - - return New(params) -} - -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) - if err != nil { - return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) - } - - if !params.Secure { - params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) - } - - s3obj := s3.New(auth, params.Region) - bucket := s3obj.Bucket(params.Bucket) - - if params.V4Auth { - s3obj.Signature = aws.V4Signature - } else { - if params.Region.Name == "eu-central-1" { - return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") - } - } - - // Validate that the given credentials have at least read permissions in the - // given bucket scope. - if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { - return nil, err - } - - // TODO Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new s3driver while another one is running on the same bucket. - // multis, _, err := bucket.ListMulti("", "") - // if err != nil { - // return nil, err - // } - - // for _, multi := range multis { - // err := multi.Abort() - // //TODO appropriate to do this error checking? - // if err != nil { - // return nil, err - // } - // } - - d := &driver{ - S3: s3obj, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, - zeros: make([]byte, params.ChunkSize), - } - - d.pool.New = func() interface{} { - return make([]byte, d.ChunkSize) - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Bucket.Get(d.s3Path(path)) - if err != nil { - return nil, parseError(path, err) - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") - - resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) - if err != nil { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return nil, parseError(path, err) - } - return resp.Body, nil -} - -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - partNumber := 1 - bytesRead := 0 - var putErrChan chan error - parts := []s3.Part{} - var part s3.Part - done := make(chan struct{}) // stopgap to free up waiting goroutines - - multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return 0, err - } - - buf := d.getbuf() - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } - } - - if len(parts) > 0 { - if multi == nil { - // Parts should be empty if the multi is not initialized - panic("Unreachable") - } else { - if multi.Complete(parts) != nil { - multi.Abort() - } - } - } - - d.putbuf(buf) // needs to be here to pick up new buf value - close(done) // free up any waiting goroutines - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(ctx, path, 0) - if err != nil { - return err - } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - - } - return nil - } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - defer d.putbuf(buf) // this buffer gets dropped after this call - - // DRAGONS(stevvooe): There are few things one might want to know - // about this section. First, the putErrChan is expecting an error - // and a nil or just a nil to come through the channel. This is - // covered by the silly defer below. The other aspect is the s3 - // retry backoff to deal with RequestTimeout errors. Even though - // the underlying s3 library should handle it, it doesn't seem to - // be part of the shouldRetry function (see AdRoll/goamz/s3). - defer func() { - select { - case putErrChan <- nil: // for some reason, we do this no matter what. - case <-done: - return // ensure we don't leak the goroutine - } - }() - - if bytesRead <= 0 { - return - } - - var err error - var part s3.Part - - loop: - for retries := 0; retries < 5; retries++ { - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) - if err == nil { - break // success! - } - - // NOTE(stevvooe): This retry code tries to only retry under - // conditions where the s3 package does not. We may add s3 - // error codes to the below if we see others bubble up in the - // application. Right now, the most troubling is - // RequestTimeout, which seems to only triggered when a tcp - // connection to s3 slows to a crawl. If the RequestTimeout - // ends up getting added to the s3 library and we don't see - // other errors, this retry loop can be removed. - switch err := err.(type) { - case *s3.Error: - switch err.Code { - case "RequestTimeout": - // allow retries on only this error. - default: - break loop - } - } - - backoff := 100 * time.Millisecond * time.Duration(retries+1) - logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) - time.Sleep(backoff) - } - - if err != nil { - logrus.Errorf("error putting part, aborting: %v", err) - select { - case putErrChan <- err: - case <-done: - return // don't leak the goroutine - } - } - - // parts and partNumber are safe, because this function is the - // only one modifying them and we force it to be executed - // serially. - parts = append(parts, part) - partNumber++ - }(bytesRead, from, buf) - - buf = d.getbuf() // use a new buffer for the next call - return nil - } - - if offset > 0 { - resp, err := d.Bucket.Head(d.s3Path(path), nil) - if err != nil { - if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" { - return 0, err - } - } - - currentLength := int64(0) - if err == nil { - currentLength = resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, part) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) - if err != nil { - return totalRead, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - - } - } - - for { - if err = fromReader(0); err != nil { - return totalRead, err - } - - if int64(bytesRead) < d.ChunkSize { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(listResponse.Contents) == 1 { - if listResponse.Contents[0].Key != d.s3Path(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = listResponse.Contents[0].Size - - timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) - if err != nil { - return nil, err - } - fi.ModTime = timestamp - } - } else if len(listResponse.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - if path != "/" && path[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.s3Path("") == "" { - prefix = "/" - } - - listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) - if err != nil { - return nil, err - } - - files := []string{} - directories := []string{} - - for { - for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) - } - - for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) - } - - if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } else { - break - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), - s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) - if err != nil { - return parseError(sourcePath, err) - } - - return d.Delete(ctx, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - s3Objects := make([]s3.Object, listMax) - - for len(listResponse.Contents) > 0 { - for index, key := range listResponse.Contents { - s3Objects[index].Key = key.Key - } - - err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) - if err != nil { - return nil - } - - listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil -} - -func (d *driver) s3Path(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -// S3BucketKey returns the s3 bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).s3Path(path) -} - -func parseError(path string, err error) error { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*aws.Error) - return ok && s3err.Code == code -} - -func (d *driver) getOptions() s3.Options { - return s3.Options{SSE: d.Encrypt} -} - -func getPermissions() s3.ACL { - return s3.Private -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -// getbuf returns a buffer from the driver's pool with length d.ChunkSize. -func (d *driver) getbuf() []byte { - return d.pool.Get().([]byte) -} - -func (d *driver) putbuf(p []byte) { - copy(p, d.zeros) - d.pool.Put(p) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go deleted file mode 100644 index 70172a6d..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/AdRoll/goamz/aws" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var s3DriverConstructor func(rootDirectory string) (*Driver, error) -var skipS3 func() string - -func init() { - accessKey := os.Getenv("AWS_ACCESS_KEY") - secretKey := os.Getenv("AWS_SECRET_KEY") - bucket := os.Getenv("S3_BUCKET") - encrypt := os.Getenv("S3_ENCRYPT") - secure := os.Getenv("S3_SECURE") - v4auth := os.Getenv("S3_USE_V4_AUTH") - region := os.Getenv("AWS_REGION") - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - s3DriverConstructor = func(rootDirectory string) (*Driver, error) { - encryptBool := false - if encrypt != "" { - encryptBool, err = strconv.ParseBool(encrypt) - if err != nil { - return nil, err - } - } - - secureBool := true - if secure != "" { - secureBool, err = strconv.ParseBool(secure) - if err != nil { - return nil, err - } - } - - v4AuthBool := false - if v4auth != "" { - v4AuthBool, err = strconv.ParseBool(v4auth) - if err != nil { - return nil, err - } - } - - parameters := DriverParameters{ - accessKey, - secretKey, - bucket, - aws.GetRegion(region), - encryptBool, - secureBool, - v4AuthBool, - minChunkSize, - rootDirectory, - } - - return New(parameters) - } - - // Skip S3 storage driver tests if environment variable parameters are not provided - skipS3 = func() string { - if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" - } - return "" - } - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return s3DriverConstructor(root) - }, skipS3) -} - -func TestEmptyRootList(t *testing.T) { - if skipS3() != "" { - t.Skip(skipS3()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := s3DriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := s3DriverConstructor("") - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := s3DriverConstructor("/") - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := context.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rootedDriver.Delete(ctx, filename) - - keys, err := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, err = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go b/vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go deleted file mode 100644 index bade099f..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/storagedriver.go +++ /dev/null @@ -1,125 +0,0 @@ -package driver - -import ( - "errors" - "fmt" - "io" - "regexp" - "strconv" - "strings" - - "github.com/docker/distribution/context" -) - -// Version is a string representing the storage driver version, of the form -// Major.Minor. -// The registry must accept storage drivers with equal major version and greater -// minor version, but may not be compatible with older storage driver versions. -type Version string - -// Major returns the major (primary) component of a version. -func (version Version) Major() uint { - majorPart := strings.Split(string(version), ".")[0] - major, _ := strconv.ParseUint(majorPart, 10, 0) - return uint(major) -} - -// Minor returns the minor (secondary) component of a version. -func (version Version) Minor() uint { - minorPart := strings.Split(string(version), ".")[1] - minor, _ := strconv.ParseUint(minorPart, 10, 0) - return uint(minor) -} - -// CurrentVersion is the current storage driver Version. -const CurrentVersion Version = "0.1" - -// StorageDriver defines methods that a Storage Driver must implement for a -// filesystem-like key/value object storage. -type StorageDriver interface { - // Name returns the human-readable "name" of the driver, useful in error - // messages and logging. By convention, this will just be the registration - // name, but drivers may provide other information here. - Name() string - - // GetContent retrieves the content stored at "path" as a []byte. - // This should primarily be used for small objects. - GetContent(ctx context.Context, path string) ([]byte, error) - - // PutContent stores the []byte content at a location designated by "path". - // This should primarily be used for small objects. - PutContent(ctx context.Context, path string, content []byte) error - - // ReadStream retrieves an io.ReadCloser for the content stored at "path" - // with a given byte offset. - // May be used to resume reading a stream by providing a nonzero offset. - ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) - - // WriteStream stores the contents of the provided io.ReadCloser at a - // location designated by the given path. - // May be used to resume writing a stream by providing a nonzero offset. - // The offset must be no larger than the CurrentSize for this path. - WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) - - // Stat retrieves the FileInfo for the given path, including the current - // size in bytes and the creation time. - Stat(ctx context.Context, path string) (FileInfo, error) - - // List returns a list of the objects that are direct descendants of the - //given path. - List(ctx context.Context, path string) ([]string, error) - - // Move moves an object stored at sourcePath to destPath, removing the - // original object. - // Note: This may be no more efficient than a copy followed by a delete for - // many implementations. - Move(ctx context.Context, sourcePath string, destPath string) error - - // Delete recursively deletes all objects stored at "path" and its subpaths. - Delete(ctx context.Context, path string) error - - // URLFor returns a URL which may be used to retrieve the content stored at - // the given path, possibly using the given options. - // May return an ErrUnsupportedMethod in certain StorageDriver - // implementations. - URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) -} - -// PathRegexp is the regular expression which each file path must match. A -// file path is absolute, beginning with a slash and containing a positive -// number of path components separated by slashes, where each component is -// restricted to lowercase alphanumeric characters or a period, underscore, or -// hyphen. -var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) - -// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. -var ErrUnsupportedMethod = errors.New("unsupported method") - -// PathNotFoundError is returned when operating on a nonexistent path. -type PathNotFoundError struct { - Path string -} - -func (err PathNotFoundError) Error() string { - return fmt.Sprintf("Path not found: %s", err.Path) -} - -// InvalidPathError is returned when the provided path is malformed. -type InvalidPathError struct { - Path string -} - -func (err InvalidPathError) Error() string { - return fmt.Sprintf("Invalid path: %s", err.Path) -} - -// InvalidOffsetError is returned when attempting to read or write from an -// invalid offset. -type InvalidOffsetError struct { - Path string - Offset int64 -} - -func (err InvalidOffsetError) Error() string { - return fmt.Sprintf("Invalid offset: %d for path: %s", err.Offset, err.Path) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go deleted file mode 100644 index 0921ccc0..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift.go +++ /dev/null @@ -1,657 +0,0 @@ -// Package swift provides a storagedriver.StorageDriver implementation to -// store blobs in Openstack Swift object storage. -// -// This package leverages the ncw/swift client library for interfacing with -// Swift. -// -// It supports both TempAuth authentication and Keystone authentication -// (up to version 3). -// -// Since Swift has no concept of directories (directories are an abstration), -// empty objects are created with the MIME type application/vnd.swift.directory. -// -// As Swift has a limit on the size of a single uploaded object (by default -// this is 5GB), the driver makes use of the Swift Large Object Support -// (http://docs.openstack.org/developer/swift/overview_large_objects.html). -// Only one container is used for both manifests and data objects. Manifests -// are stored in the 'files' pseudo directory, data objects are stored under -// 'segments'. -package swift - -import ( - "bytes" - "crypto/rand" - "crypto/sha1" - "crypto/tls" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - gopath "path" - "strconv" - "strings" - "time" - - "github.com/mitchellh/mapstructure" - "github.com/ncw/swift" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/version" -) - -const driverName = "swift" - -// defaultChunkSize defines the default size of a segment -const defaultChunkSize = 20 * 1024 * 1024 - -// minChunkSize defines the minimum size of a segment -const minChunkSize = 1 << 20 - -// Parameters A struct that encapsulates all of the driver parameters after all values have been set -type Parameters struct { - Username string - Password string - AuthURL string - Tenant string - TenantID string - Domain string - DomainID string - Region string - Container string - Prefix string - InsecureSkipVerify bool - ChunkSize int -} - -type swiftInfo map[string]interface{} - -func init() { - factory.Register(driverName, &swiftDriverFactory{}) -} - -// swiftDriverFactory implements the factory.StorageDriverFactory interface -type swiftDriverFactory struct{} - -func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Conn swift.Connection - Container string - Prefix string - BulkDeleteSupport bool - ChunkSize int -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift -// Objects are stored at absolute keys in the provided container. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - username -// - password -// - authurl -// - container -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - params := Parameters{ - ChunkSize: defaultChunkSize, - InsecureSkipVerify: false, - } - - if err := mapstructure.Decode(parameters, ¶ms); err != nil { - return nil, err - } - - if params.Username == "" { - return nil, fmt.Errorf("No username parameter provided") - } - - if params.Password == "" { - return nil, fmt.Errorf("No password parameter provided") - } - - if params.AuthURL == "" { - return nil, fmt.Errorf("No authurl parameter provided") - } - - if params.Container == "" { - return nil, fmt.Errorf("No container parameter provided") - } - - if params.ChunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) - } - - return New(params) -} - -// New constructs a new Driver with the given Openstack Swift credentials and container name -func New(params Parameters) (*Driver, error) { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - MaxIdleConnsPerHost: 2048, - TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, - } - - ct := swift.Connection{ - UserName: params.Username, - ApiKey: params.Password, - AuthUrl: params.AuthURL, - Region: params.Region, - UserAgent: "distribution/" + version.Version, - Tenant: params.Tenant, - TenantId: params.TenantID, - Domain: params.Domain, - DomainId: params.DomainID, - Transport: transport, - ConnectTimeout: 60 * time.Second, - Timeout: 15 * 60 * time.Second, - } - err := ct.Authenticate() - if err != nil { - return nil, fmt.Errorf("Swift authentication failed: %s", err) - } - - if err := ct.ContainerCreate(params.Container, nil); err != nil { - return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) - } - - d := &driver{ - Conn: ct, - Container: params.Container, - Prefix: params.Prefix, - BulkDeleteSupport: detectBulkDelete(params.AuthURL), - ChunkSize: params.ChunkSize, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(swift.Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" - - file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - return file, err -} - -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { - var ( - segments []swift.Object - multi io.Reader - paddingReader io.Reader - currentLength int64 - cursor int64 - segmentPath string - ) - - partNumber := 1 - chunkSize := int64(d.ChunkSize) - zeroBuf := make([]byte, d.ChunkSize) - - getSegment := func() string { - return fmt.Sprintf("%s/%016d", segmentPath, partNumber) - } - - max := func(a int64, b int64) int64 { - if a > b { - return a - } - return b - } - - createManifest := true - info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) - if err == nil { - manifest, ok := headers["X-Object-Manifest"] - if !ok { - if segmentPath, err = d.swiftSegmentPath(path); err != nil { - return 0, err - } - if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { - return 0, err - } - segments = append(segments, info) - } else { - _, segmentPath = parseManifest(manifest) - if segments, err = d.getAllSegments(segmentPath); err != nil { - return 0, err - } - createManifest = false - } - currentLength = info.Bytes - } else if err == swift.ObjectNotFound { - if segmentPath, err = d.swiftSegmentPath(path); err != nil { - return 0, err - } - } else { - return 0, err - } - - if createManifest { - if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { - return 0, err - } - } - - // First, we skip the existing segments that are not modified by this call - for i := range segments { - if offset < cursor+segments[i].Bytes { - break - } - cursor += segments[i].Bytes - partNumber++ - } - - // We reached the end of the file but we haven't reached 'offset' yet - // Therefore we add blocks of zeros - if offset >= currentLength { - for offset-currentLength >= chunkSize { - // Insert a block a zero - _, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) - if err != nil { - if err == swift.ObjectNotFound { - return 0, storagedriver.PathNotFoundError{Path: getSegment()} - } - return 0, err - } - currentLength += chunkSize - partNumber++ - } - - cursor = currentLength - paddingReader = bytes.NewReader(zeroBuf) - } else if offset-cursor > 0 { - // Offset is inside the current segment : we need to read the - // data from the beginning of the segment to offset - file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) - if err != nil { - if err == swift.ObjectNotFound { - return 0, storagedriver.PathNotFoundError{Path: getSegment()} - } - return 0, err - } - defer file.Close() - paddingReader = file - } - - readers := []io.Reader{} - if paddingReader != nil { - readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) - } - readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) - multi = io.MultiReader(readers...) - - writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { - currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) - if err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} - } - return false, bytesRead, err - } - - n, err := io.Copy(currentSegment, multi) - if err != nil { - return false, bytesRead, err - } - - if n > 0 { - defer currentSegment.Close() - bytesRead += n - max(0, offset-cursor) - } - - if n < chunkSize { - // We wrote all the data - if cursor+n < currentLength { - // Copy the end of the chunk - headers := make(swift.Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) - file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) - if err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: path} - } - return false, bytesRead, err - } - - _, copyErr := io.Copy(currentSegment, file) - - if err := file.Close(); err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: path} - } - return false, bytesRead, err - } - - if copyErr != nil { - return false, bytesRead, copyErr - } - } - - return true, bytesRead, nil - } - - multi = io.LimitReader(reader, chunkSize) - cursor += chunkSize - partNumber++ - - return false, bytesRead, nil - } - - finished := false - read := int64(0) - bytesRead := int64(0) - for finished == false { - finished, read, err = writeSegment(getSegment()) - bytesRead += read - if err != nil { - return bytesRead, err - } - } - - return bytesRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - swiftPath := d.swiftPath(path) - opts := &swift.ObjectsOpts{ - Prefix: swiftPath, - Delimiter: '/', - } - - objects, err := d.Conn.ObjectsAll(d.Container, opts) - if err != nil { - if err == swift.ContainerNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), - } - - for _, obj := range objects { - if obj.PseudoDirectory && obj.Name == swiftPath+"/" { - fi.IsDir = true - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil - } else if obj.Name == swiftPath { - // On Swift 1.12, the 'bytes' field is always 0 - // so we need to do a second HEAD request - info, _, err := d.Conn.Object(d.Container, swiftPath) - if err != nil { - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - fi.IsDir = false - fi.Size = info.Bytes - fi.ModTime = info.LastModified - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil - } - } - - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - var files []string - - prefix := d.swiftPath(path) - if prefix != "" { - prefix += "/" - } - - opts := &swift.ObjectsOpts{ - Prefix: prefix, - Delimiter: '/', - } - - objects, err := d.Conn.ObjectsAll(d.Container, opts) - for _, obj := range objects { - files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) - } - - if err == swift.ContainerNotFound { - return files, storagedriver.PathNotFoundError{Path: path} - } - return files, err -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) - if err == nil { - if manifest, ok := headers["X-Object-Manifest"]; ok { - if err = d.createManifest(destPath, manifest); err != nil { - return err - } - err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) - } else { - err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) - } - } - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: sourcePath} - } - return err -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - opts := swift.ObjectsOpts{ - Prefix: d.swiftPath(path) + "/", - } - - objects, err := d.Conn.ObjectsAll(d.Container, &opts) - if err != nil { - if err == swift.ContainerNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - - if d.BulkDeleteSupport { - filenames := make([]string, len(objects)) - for i, obj := range objects { - filenames[i] = obj.Name - } - if _, err := d.Conn.BulkDelete(d.Container, filenames); err != swift.Forbidden { - if err == swift.ContainerNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - } - - for _, obj := range objects { - if obj.PseudoDirectory { - continue - } - if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { - manifest, ok := headers["X-Object-Manifest"] - if ok { - segContainer, prefix := parseManifest(manifest) - segments, err := d.getAllSegments(prefix) - if err != nil { - return err - } - - for _, s := range segments { - if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: s.Name} - } - return err - } - } - } - } else { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: obj.Name} - } - return err - } - - if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: obj.Name} - } - return err - } - } - - _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) - if err == nil { - if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - } else if err == swift.ObjectNotFound { - if len(objects) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - } else { - return err - } - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod -} - -func (d *driver) swiftPath(path string) string { - return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") -} - -func (d *driver) swiftSegmentPath(path string) (string, error) { - checksum := sha1.New() - random := make([]byte, 32) - if _, err := rand.Read(random); err != nil { - return "", err - } - path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) - return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -func (d *driver) getAllSegments(path string) ([]swift.Object, error) { - segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) - if err == swift.ContainerNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return segments, err -} - -func (d *driver) createManifest(path string, segments string) error { - headers := make(swift.Headers) - headers["X-Object-Manifest"] = segments - manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) - if err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - if err := manifest.Close(); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - return nil -} - -func detectBulkDelete(authURL string) (bulkDelete bool) { - resp, err := http.Get(gopath.Join(authURL, "..", "..") + "/info") - if err == nil { - defer resp.Body.Close() - decoder := json.NewDecoder(resp.Body) - var infos swiftInfo - if decoder.Decode(&infos) == nil { - _, bulkDelete = infos["bulk_delete"] - } - } - return -} - -func parseManifest(manifest string) (container string, prefix string) { - components := strings.SplitN(manifest, "/", 2) - container = components[0] - if len(components) > 1 { - prefix = components[1] - } - return container, prefix -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go b/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go deleted file mode 100644 index 6be2238a..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package swift - -import ( - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/ncw/swift/swifttest" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var swiftDriverConstructor func(prefix string) (*Driver, error) - -func init() { - var ( - username string - password string - authURL string - tenant string - tenantID string - domain string - domainID string - container string - region string - insecureSkipVerify bool - swiftServer *swifttest.SwiftServer - err error - ) - username = os.Getenv("SWIFT_USERNAME") - password = os.Getenv("SWIFT_PASSWORD") - authURL = os.Getenv("SWIFT_AUTH_URL") - tenant = os.Getenv("SWIFT_TENANT_NAME") - tenantID = os.Getenv("SWIFT_TENANT_ID") - domain = os.Getenv("SWIFT_DOMAIN_NAME") - domainID = os.Getenv("SWIFT_DOMAIN_ID") - container = os.Getenv("SWIFT_CONTAINER_NAME") - region = os.Getenv("SWIFT_REGION_NAME") - insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) - - if username == "" || password == "" || authURL == "" || container == "" { - if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { - panic(err) - } - username = "swifttest" - password = "swifttest" - authURL = swiftServer.AuthURL - container = "test" - } - - prefix, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(prefix) - - swiftDriverConstructor = func(root string) (*Driver, error) { - parameters := Parameters{ - username, - password, - authURL, - tenant, - tenantID, - domain, - domainID, - region, - container, - root, - insecureSkipVerify, - defaultChunkSize, - } - - return New(parameters) - } - - driverConstructor := func() (storagedriver.StorageDriver, error) { - return swiftDriverConstructor(prefix) - } - - testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) -} - -func TestEmptyRootList(t *testing.T) { - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := swiftDriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := swiftDriverConstructor("") - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := swiftDriverConstructor("/") - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := context.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rootedDriver.Delete(ctx, filename) - - keys, err := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, err = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go deleted file mode 100644 index 770c428c..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go +++ /dev/null @@ -1,1163 +0,0 @@ -package testsuites - -import ( - "bytes" - "crypto/sha1" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path" - "sort" - "sync" - "testing" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "gopkg.in/check.v1" -) - -// Test hooks up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -// RegisterSuite registers an in-process storage driver test suite with -// the go test runner. -func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { - check.Suite(&DriverSuite{ - Constructor: driverConstructor, - SkipCheck: skipCheck, - ctx: context.Background(), - }) -} - -// SkipCheck is a function used to determine if a test suite should be skipped. -// If a SkipCheck returns a non-empty skip reason, the suite is skipped with -// the given reason. -type SkipCheck func() (reason string) - -// NeverSkip is a default SkipCheck which never skips the suite. -var NeverSkip SkipCheck = func() string { return "" } - -// DriverConstructor is a function which returns a new -// storagedriver.StorageDriver. -type DriverConstructor func() (storagedriver.StorageDriver, error) - -// DriverTeardown is a function which cleans up a suite's -// storagedriver.StorageDriver. -type DriverTeardown func() error - -// DriverSuite is a gocheck test suite designed to test a -// storagedriver.StorageDriver. The intended way to create a DriverSuite is -// with RegisterSuite. -type DriverSuite struct { - Constructor DriverConstructor - Teardown DriverTeardown - SkipCheck - storagedriver.StorageDriver - ctx context.Context -} - -// SetUpSuite sets up the gocheck test suite. -func (suite *DriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } - d, err := suite.Constructor() - c.Assert(err, check.IsNil) - suite.StorageDriver = d -} - -// TearDownSuite tears down the gocheck test suite. -func (suite *DriverSuite) TearDownSuite(c *check.C) { - if suite.Teardown != nil { - err := suite.Teardown() - c.Assert(err, check.IsNil) - } -} - -// TearDownTest tears down the gocheck test. -// This causes the suite to abort if any files are left around in the storage -// driver. -func (suite *DriverSuite) TearDownTest(c *check.C) { - files, _ := suite.StorageDriver.List(suite.ctx, "/") - if len(files) > 0 { - c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) - } -} - -// TestValidPaths checks that various valid file paths are accepted by the -// storage driver. -func (suite *DriverSuite) TestValidPaths(c *check.C) { - contents := randomContents(64) - validFiles := []string{ - "/a", - "/2", - "/aa", - "/a.a", - "/0-9/abcdefg", - "/abcdefg/z.75", - "/abc/1.2.3.4.5-6_zyx/123.z/4", - "/docker/docker-registry", - "/123.abc", - "/abc./abc", - "/.abc", - "/a--b", - "/a-.b", - "/_.abc", - "/Docker/docker-registry", - "/Abc/Cba"} - - for _, filename := range validFiles { - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - } -} - -// TestInvalidPaths checks that various invalid file paths are rejected by the -// storage driver. -func (suite *DriverSuite) TestInvalidPaths(c *check.C) { - contents := randomContents(64) - invalidFiles := []string{ - "", - "/", - "abc", - "123.abc", - "//bcd", - "/abc_123/"} - - for _, filename := range invalidFiles { - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - } -} - -// TestWriteRead1 tests a simple write-read workflow. -func (suite *DriverSuite) TestWriteRead1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead2 tests a simple write-read workflow with unicode data. -func (suite *DriverSuite) TestWriteRead2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead3 tests a simple write-read workflow with a small string. -func (suite *DriverSuite) TestWriteRead3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead4 tests a simple write-read workflow with 1MB of data. -func (suite *DriverSuite) TestWriteRead4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompare(c, filename, contents) -} - -// TestTruncate tests that putting smaller contents than an original file does -// remove the excess contents. -func (suite *DriverSuite) TestTruncate(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) - - contents = randomContents(1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestReadNonexistent tests reading content from an empty path. -func (suite *DriverSuite) TestReadNonexistent(c *check.C) { - filename := randomPath(32) - _, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestWriteReadStreams1 tests a simple write-read streaming workflow. -func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams2 tests a simple write-read streaming workflow with -// unicode data. -func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams3 tests a simple write-read streaming workflow with a -// small amount of data. -func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB -// of data. -func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the -// storage driver safely. -func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - checksum := sha1.New() - var fileSize int64 = 5 * 1024 * 1024 * 1024 - - contents := newRandReader(fileSize) - written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, io.TeeReader(contents, checksum)) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, fileSize) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - writtenChecksum := sha1.New() - io.Copy(writtenChecksum, reader) - - c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) -} - -// TestReadStreamWithOffset tests that the appropriate data is streamed when -// reading with a given offset. -func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - chunkSize := int64(32) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - c.Assert(err, check.IsNil) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) - - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*2) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contentsChunk3) - - // Ensure we get invalid offest for negative offsets. - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, -1) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(reader, check.IsNil) - - // Read past the end of the content and make sure we get a reader that - // returns 0 bytes and io.EOF - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3) - c.Assert(err, check.IsNil) - defer reader.Close() - - buf := make([]byte, chunkSize) - n, err := reader.Read(buf) - c.Assert(err, check.Equals, io.EOF) - c.Assert(n, check.Equals, 0) - - // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3-1) - c.Assert(err, check.IsNil) - defer reader.Close() - - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 1) - - // We don't care whether the io.EOF comes on the this read or the first - // zero read, but the only error acceptable here is io.EOF. - if err != nil { - c.Assert(err, check.Equals, io.EOF) - } - - // Any more reads should result in zero bytes and io.EOF - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 0) - c.Assert(err, check.Equals, io.EOF) -} - -// TestContinueStreamAppendLarge tests that a stream write can be appended to without -// corrupting the data with a large chunk size. -func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { - suite.testContinueStreamAppend(c, int64(10*1024*1024)) -} - -// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only -// with a tiny chunk size in order to test corner cases for some cloud storage drivers. -func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { - suite.testContinueStreamAppend(c, int64(32)) -} - -func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - contentsChunk4 := randomContents(chunkSize) - zeroChunk := make([]byte, int64(chunkSize)) - - fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) - - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contentsChunk1)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk1))) - - fi, err := suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(contentsChunk2)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) - - // Test re-writing the last chunk - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) - - received, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, fullContents) - - // Writing past size of file extends file (no offset error). We would like - // to write chunk 4 one chunk length past chunk 3. It should be successful - // and the resulting file will be 5 chunks long, with a chunk of all - // zeros. - - fullContents = append(fullContents, zeroChunk...) - fullContents = append(fullContents, contentsChunk4...) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, chunkSize) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) - - received, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(len(received), check.Equals, len(fullContents)) - c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) - c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) - c.Assert(received, check.DeepEquals, fullContents) - - // Ensure that negative offsets return correct error. - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, -1, bytes.NewReader(zeroChunk)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) -} - -// TestReadNonexistentStream tests that reading a stream for a nonexistent path -// fails. -func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { - filename := randomPath(32) - - _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestList checks the returned list of keys after populating a directory tree. -func (suite *DriverSuite) TestList(c *check.C) { - rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) - defer suite.StorageDriver.Delete(suite.ctx, rootDirectory) - - parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles := make([]string, 50) - for i := 0; i < len(childFiles); i++ { - childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles[i] = childFile - err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) - c.Assert(err, check.IsNil) - } - sort.Strings(childFiles) - - keys, err := suite.StorageDriver.List(suite.ctx, "/") - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{rootDirectory}) - - keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{parentDirectory}) - - keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) - c.Assert(err, check.IsNil) - - sort.Strings(keys) - c.Assert(keys, check.DeepEquals, childFiles) - - // A few checks to add here (check out #819 for more discussion on this): - // 1. Ensure that all paths are absolute. - // 2. Ensure that listings only include direct children. - // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). -} - -// TestMove checks that a moved object no longer exists at the source path and -// does exist at the destination. -func (suite *DriverSuite) TestMove(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - - _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestMoveOverwrite checks that a moved object no longer exists at the source -// path and overwrites the contents at the destination. -func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { - sourcePath := randomPath(32) - destPath := randomPath(32) - sourceContents := randomContents(32) - destContents := randomContents(64) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, sourceContents) - - _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestMoveNonexistent checks that moving a nonexistent key fails and does not -// delete the data at the destination path. -func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) -} - -// TestMoveInvalid provides various checks for invalid moves. -func (suite *DriverSuite) TestMoveInvalid(c *check.C) { - contents := randomContents(32) - - // Create a regular file. - err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) - c.Assert(err, check.IsNil) - defer suite.StorageDriver.Delete(suite.ctx, "/notadir") - - // Now try to move a non-existent file under it. - err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") - c.Assert(err, check.NotNil) // non-nil error -} - -// TestDelete checks that the delete operation removes data from the storage -// driver -func (suite *DriverSuite) TestDelete(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, filename) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestURLFor checks that the URLFor method functions properly, but only if it -// is implemented -func (suite *DriverSuite) TestURLFor(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) - if err == storagedriver.ErrUnsupportedMethod { - return - } - c.Assert(err, check.IsNil) - - response, err := http.Get(url) - c.Assert(err, check.IsNil) - defer response.Body.Close() - - read, err := ioutil.ReadAll(response.Body) - c.Assert(err, check.IsNil) - c.Assert(read, check.DeepEquals, contents) - - url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) - if err == storagedriver.ErrUnsupportedMethod { - return - } - c.Assert(err, check.IsNil) - - response, err = http.Head(url) - c.Assert(response.StatusCode, check.Equals, 200) - c.Assert(response.ContentLength, check.Equals, int64(32)) -} - -// TestDeleteNonexistent checks that removing a nonexistent key fails. -func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { - filename := randomPath(32) - err := suite.StorageDriver.Delete(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestDeleteFolder checks that deleting a folder removes all child elements. -func (suite *DriverSuite) TestDeleteFolder(c *check.C) { - dirname := randomPath(32) - filename1 := randomPath(32) - filename2 := randomPath(32) - filename3 := randomPath(32) - contents := randomContents(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirname)) - - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, dirname) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) -} - -// TestStatCall runs verifies the implementation of the storagedriver's Stat call. -func (suite *DriverSuite) TestStatCall(c *check.C) { - content := randomContents(4096) - dirPath := randomPath(32) - fileName := randomFilename(32) - filePath := path.Join(dirPath, fileName) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirPath)) - - // Call on non-existent file/dir, check error. - fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(fi, check.IsNil) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(fi, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) - c.Assert(err, check.IsNil) - - // Call on regular file, check results - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, filePath) - c.Assert(fi.Size(), check.Equals, int64(len(content))) - c.Assert(fi.IsDir(), check.Equals, false) - createdTime := fi.ModTime() - - // Sleep and modify the file - time.Sleep(time.Second * 10) - content = randomContents(4096) - err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) - c.Assert(err, check.IsNil) - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) - - // Check if the modification time is after the creation time. - // In case of cloud storage services, storage frontend nodes might have - // time drift between them, however that should be solved with sleeping - // before update. - modTime := fi.ModTime() - if !modTime.After(createdTime) { - c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) - } - - // Call on directory (do not check ModTime as dirs don't need to support it) - fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, dirPath) - c.Assert(fi.Size(), check.Equals, int64(0)) - c.Assert(fi.IsDir(), check.Equals, true) -} - -// TestPutContentMultipleTimes checks that if storage driver can overwrite the content -// in the subsequent puts. Validates that PutContent does not have to work -// with an offset like WriteStream does and overwrites the file entirely -// rather than writing the data to the [0,len(data)) of the file. -func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { - filename := randomPath(32) - contents := randomContents(4096) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - contents = randomContents(2048) // upload a different, smaller file - err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents) -} - -// TestConcurrentStreamReads checks that multiple clients can safely read from -// the same file simultaneously with various offsets. -func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { - var filesize int64 = 128 * 1024 * 1024 - - if testing.Short() { - filesize = 10 * 1024 * 1024 - c.Log("Reducing file size to 10MB for short mode") - } - - filename := randomPath(32) - contents := randomContents(filesize) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - var wg sync.WaitGroup - - readContents := func() { - defer wg.Done() - offset := rand.Int63n(int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents[offset:]) - } - - wg.Add(10) - for i := 0; i < 10; i++ { - go readContents() - } - wg.Wait() -} - -// TestConcurrentFileStreams checks that multiple *os.File objects can be passed -// in to WriteStream concurrently without hanging. -func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { - numStreams := 32 - - if testing.Short() { - numStreams = 8 - c.Log("Reducing number of streams to 8 for short mode") - } - - var wg sync.WaitGroup - - testStream := func(size int64) { - defer wg.Done() - suite.testFileStreams(c, size) - } - - wg.Add(numStreams) - for i := numStreams; i > 0; i-- { - go testStream(int64(numStreams) * 1024 * 1024) - } - - wg.Wait() -} - -// TestEventualConsistency checks that if stat says that a file is a certain size, then -// you can freely read from the file (this is the only guarantee that the driver needs to provide) -func (suite *DriverSuite) TestEventualConsistency(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - var offset int64 - var misswrites int - var chunkSize int64 = 32 - - for i := 0; i < 1024; i++ { - contents := randomContents(chunkSize) - read, err := suite.StorageDriver.WriteStream(suite.ctx, filename, offset, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - - fi, err := suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - - // We are most concerned with being able to read data as soon as Stat declares - // it is uploaded. This is the strongest guarantee that some drivers (that guarantee - // at best eventual consistency) absolutely need to provide. - if fi.Size() == offset+chunkSize { - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) - - reader.Close() - offset += read - } else { - misswrites++ - } - } - - if misswrites > 0 { - c.Log("There were " + string(misswrites) + " occurences of a write not being instantly available.") - } - - c.Assert(misswrites, check.Not(check.Equals), 1024) -} - -// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files -func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 0) -} - -// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files -func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024) -} - -// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files -func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024) -} - -// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files -func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - } -} - -// BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files -func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 0) -} - -// BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files -func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024) -} - -// BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files -func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024) -} - -// BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files -func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(randomContents(size))) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, size) - - rc, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - rc.Close() - } -} - -// BenchmarkList5Files benchmarks List for 5 small files -func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { - suite.benchmarkListFiles(c, 5) -} - -// BenchmarkList50Files benchmarks List for 50 small files -func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { - suite.benchmarkListFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := int64(0); i < numFiles; i++ { - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - - c.ResetTimer() - for i := 0; i < c.N; i++ { - files, err := suite.StorageDriver.List(suite.ctx, parentDir) - c.Assert(err, check.IsNil) - c.Assert(int64(len(files)), check.Equals, numFiles) - } -} - -// BenchmarkDelete5Files benchmarks Delete for 5 small files -func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 5) -} - -// BenchmarkDelete50Files benchmarks Delete for 50 small files -func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { - for i := 0; i < c.N; i++ { - parentDir := randomPath(8) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - - c.StopTimer() - for j := int64(0); j < numFiles; j++ { - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - c.StartTimer() - - // This is the operation we're benchmarking - err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - c.Assert(err, check.IsNil) - } -} - -func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { - tf, err := ioutil.TempFile("", "tf") - c.Assert(err, check.IsNil) - defer os.Remove(tf.Name()) - defer tf.Close() - - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - contents := randomContents(size) - - _, err = tf.Write(contents) - c.Assert(err, check.IsNil) - - tf.Sync() - tf.Seek(0, os.SEEK_SET) - - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, tf) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, size) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contents))) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") -var separatorChars = []byte("._-") - -func randomPath(length int64) string { - path := "/" - for int64(len(path)) < length { - chunkLength := rand.Int63n(length-int64(len(path))) + 1 - chunk := randomFilename(chunkLength) - path += chunk - remaining := length - int64(len(path)) - if remaining == 1 { - path += randomFilename(1) - } else if remaining > 1 { - path += "/" - } - } - return path -} - -func randomFilename(length int64) string { - b := make([]byte, length) - wasSeparator := true - for i := range b { - if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { - b[i] = separatorChars[rand.Intn(len(separatorChars))] - wasSeparator = true - } else { - b[i] = filenameChars[rand.Intn(len(filenameChars))] - wasSeparator = false - } - } - return string(b) -} - -func randomContents(length int64) []byte { - b := make([]byte, length) - for i := range b { - b[i] = byte(rand.Intn(2 << 8)) - } - return b -} - -type randReader struct { - r int64 - m sync.Mutex -} - -func (rr *randReader) Read(p []byte) (n int, err error) { - rr.m.Lock() - defer rr.m.Unlock() - for i := 0; i < len(p) && rr.r > 0; i++ { - p[i] = byte(rand.Intn(255)) - n++ - rr.r-- - } - if rr.r == 0 { - err = io.EOF - } - return -} - -func newRandReader(n int64) *randReader { - return &randReader{r: n} -} - -func firstPart(filePath string) string { - if filePath == "" { - return "/" - } - for { - if filePath[len(filePath)-1] == '/' { - filePath = filePath[:len(filePath)-1] - } - - dir, file := path.Split(filePath) - if dir == "" && file == "" { - return "/" - } - if dir == "/" || dir == "" { - return "/" + file - } - if file == "" { - return dir - } - filePath = dir - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go index 2ba62a95..f01088ba 100644 --- a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -11,6 +11,10 @@ import ( "github.com/docker/distribution/uuid" ) +// linkPathFunc describes a function that can resolve a link based on the +// repository name and digest. +type linkPathFunc func(name string, dgst digest.Digest) (string, error) + // linkedBlobStore provides a full BlobService that namespaces the blobs to a // given repository. Effectively, it manages the links in a given repository // that grant access to the global blob store. @@ -23,11 +27,13 @@ type linkedBlobStore struct { deleteEnabled bool resumableDigestEnabled bool - // linkPath allows one to control the repository blob link set to which - // the blob store dispatches. This is required because manifest and layer - // blobs have not yet been fully merged. At some point, this functionality - // should be removed an the blob links folder should be merged. - linkPath func(pm *pathMapper, name string, dgst digest.Digest) (string, error) + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc } var _ distribution.BlobStore = &linkedBlobStore{} @@ -98,7 +104,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter uuid := uuid.Generate().String() startedAt := time.Now().UTC() - path, err := lbs.blobStore.pm.path(uploadDataPathSpec{ + path, err := pathFor(uploadDataPathSpec{ name: lbs.repository.Name(), id: uuid, }) @@ -107,7 +113,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter return nil, err } - startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ name: lbs.repository.Name(), id: uuid, }) @@ -127,7 +133,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") - startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ name: lbs.repository.Name(), id: id, }) @@ -151,7 +157,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution return nil, err } - path, err := lbs.pm.path(uploadDataPathSpec{ + path, err := pathFor(uploadDataPathSpec{ name: lbs.repository.Name(), id: id, }) @@ -213,13 +219,16 @@ func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution // Don't make duplicate links. seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + // only use the first link + linkPathFn := lbs.linkPathFns[0] + for _, dgst := range dgsts { if _, seen := seenDigests[dgst]; seen { continue } seenDigests[dgst] = struct{}{} - blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) if err != nil { return err } @@ -236,33 +245,43 @@ type linkedBlobStatter struct { *blobStore repository distribution.Repository - // linkPath allows one to control the repository blob link set to which - // the blob store dispatches. This is required because manifest and layer - // blobs have not yet been fully merged. At some point, this functionality - // should be removed an the blob links folder should be merged. - linkPath func(pm *pathMapper, name string, dgst digest.Digest) (string, error) + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc } var _ distribution.BlobDescriptorService = &linkedBlobStatter{} func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) - if err != nil { - return distribution.Descriptor{}, err - } + var ( + resolveErr error + target digest.Digest + ) + + // try the many link path functions until we get success or an error that + // is not PathNotFoundError. + for _, linkPathFn := range lbs.linkPathFns { + var err error + target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) + + if err == nil { + break // success! + } - target, err := lbs.blobStore.readlink(ctx, blobLinkPath) - if err != nil { switch err := err.(type) { case driver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrBlobUnknown + resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error default: return distribution.Descriptor{}, err } + } - // TODO(stevvooe): For backwards compatibility with data in "_layers", we - // need to hit layerLinkPath, as well. Or, somehow migrate to the new path - // layout. + if resolveErr != nil { + return distribution.Descriptor{}, resolveErr } if target != dgst { @@ -276,13 +295,38 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis return lbs.blobStore.statter.Stat(ctx, target) } -func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) - if err != nil { - return err +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { + // clear any possible existence of a link described in linkPathFns + for _, linkPathFn := range lbs.linkPathFns { + blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + if err != nil { + return err + } + + err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue // just ignore this error and continue + default: + return err + } + } } - return lbs.blobStore.driver.Delete(ctx, blobLinkPath) + return nil +} + +// resolveTargetWithFunc allows us to read a link to a resource with different +// linkPathFuncs to let us try a few different paths before returning not +// found. +func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { + blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + if err != nil { + return "", err + } + + return lbs.blobStore.readlink(ctx, blobLinkPath) } func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { @@ -291,11 +335,11 @@ func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Dig } // blobLinkPath provides the path to the blob link, also known as layers. -func blobLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { - return pm.path(layerLinkPathSpec{name: name, digest: dgst}) +func blobLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(layerLinkPathSpec{name: name, digest: dgst}) } // manifestRevisionLinkPath provides the path to the manifest revision link. -func manifestRevisionLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { - return pm.path(layerLinkPathSpec{name: name, digest: dgst}) +func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) } diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go index c8c19d43..db49aaa4 100644 --- a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/libtrust" ) @@ -35,7 +35,7 @@ func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { return true, nil } -func (ms *manifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { +func (ms *manifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") return ms.revisionStore.get(ms.ctx, dgst) } @@ -50,7 +50,7 @@ func SkipLayerVerification(ms distribution.ManifestService) error { return fmt.Errorf("skip layer verification only valid for manifeststore") } -func (ms *manifestStore) Put(manifest *manifest.SignedManifest) error { +func (ms *manifestStore) Put(manifest *schema1.SignedManifest) error { context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") if err := ms.verifyManifest(ms.ctx, manifest); err != nil { @@ -83,7 +83,7 @@ func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { return ms.tagStore.exists(tag) } -func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { +func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { for _, option := range options { err := option(ms) if err != nil { @@ -104,13 +104,13 @@ func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestSe // perspective of the registry. It ensures that the signature is valid for the // enclosed payload. As a policy, the registry only tries to store valid // content, leaving trust policies of that content up to consumers. -func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *manifest.SignedManifest) error { +func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.SignedManifest) error { var errs distribution.ErrManifestVerification if mnfst.Name != ms.repository.Name() { errs = append(errs, fmt.Errorf("repository name does not match manifest name")) } - if _, err := manifest.Verify(mnfst); err != nil { + if _, err := schema1.Verify(mnfst); err != nil { switch err { case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: errs = append(errs, distribution.ErrManifestUnverified{}) diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go index a4ce9149..30126e4b 100644 --- a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -29,7 +30,10 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repo, err := registry.Repository(ctx, name) if err != nil { @@ -72,7 +76,7 @@ func TestManifestStorage(t *testing.T) { } } - m := manifest.Manifest{ + m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, @@ -91,7 +95,7 @@ func TestManifestStorage(t *testing.T) { dgst := digest.Digest(ds) testLayers[digest.Digest(dgst)] = rs - m.FSLayers = append(m.FSLayers, manifest.FSLayer{ + m.FSLayers = append(m.FSLayers, schema1.FSLayer{ BlobSum: dgst, }) } @@ -101,7 +105,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error generating private key: %v", err) } - sm, merr := manifest.Sign(&m, pk) + sm, merr := schema1.Sign(&m, pk) if merr != nil { t.Fatalf("error signing manifest: %v", err) } @@ -229,7 +233,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error generating private key: %v", err) } - sm2, err := manifest.Sign(&m, pk2) + sm2, err := schema1.Sign(&m, pk2) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } @@ -257,7 +261,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error fetching manifest: %v", err) } - if _, err := manifest.Verify(fetched); err != nil { + if _, err := schema1.Verify(fetched); err != nil { t.Fatalf("unexpected error verifying manifest: %v", err) } @@ -348,7 +352,10 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Deleted manifest get returned non-nil") } - r := NewRegistryWithDriver(ctx, env.driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) + r, err := NewRegistry(ctx, env.driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repo, err := r.Repository(ctx, env.name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -362,3 +369,37 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Unexpected success deleting while disabled") } } + +// TestLinkPathFuncs ensures that the link path functions behavior are locked +// down and implemented as expected. +func TestLinkPathFuncs(t *testing.T) { + for _, testcase := range []struct { + repo string + digest digest.Digest + linkPathFn linkPathFunc + expected string + }{ + { + repo: "foo/bar", + digest: "sha256:deadbeaf", + linkPathFn: blobLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf/link", + }, + { + repo: "foo/bar", + digest: "sha256:deadbeaf", + linkPathFn: manifestRevisionLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf/link", + }, + } { + p, err := testcase.linkPathFn(testcase.repo, testcase.digest) + if err != nil { + t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) + } + + if p != testcase.expected { + t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) + } + } + +} diff --git a/vendor/github.com/docker/distribution/registry/storage/paths.go b/vendor/github.com/docker/distribution/registry/storage/paths.go index 35debddf..e90a1993 100644 --- a/vendor/github.com/docker/distribution/registry/storage/paths.go +++ b/vendor/github.com/docker/distribution/registry/storage/paths.go @@ -8,10 +8,18 @@ import ( "github.com/docker/distribution/digest" ) -const storagePathVersion = "v2" +const ( + storagePathVersion = "v2" // fixed storage layout version + storagePathRoot = "/docker/registry/" // all driver paths have a prefix -// pathMapper maps paths based on "object names" and their ids. The "object -// names" mapped by pathMapper are internal to the storage system. + // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though + // storage path root would configurable for all drivers through this + // package. In reality, we've found it simpler to do this on a per driver + // basis. +) + +// pathFor maps paths based on "object names" and their ids. The "object +// names" mapped by are internal to the storage system. // // The path layout in the storage backend is roughly as follows: // @@ -37,7 +45,7 @@ const storagePathVersion = "v2" // -> blob/ // // -// The storage backend layout is broken up into a content- addressable blob +// The storage backend layout is broken up into a content-addressable blob // store and repositories. The content-addressable blob store holds most data // throughout the backend, keyed by algorithm and digests of the underlying // content. Access to the blob store is controled through links from the @@ -98,18 +106,7 @@ const storagePathVersion = "v2" // // For more information on the semantic meaning of each path and their // contents, please see the path spec documentation. -type pathMapper struct { - root string - version string // should be a constant? -} - -var defaultPathMapper = &pathMapper{ - root: "/docker/registry/", - version: storagePathVersion, -} - -// path returns the path identified by spec. -func (pm *pathMapper) path(spec pathSpec) (string, error) { +func pathFor(spec pathSpec) (string, error) { // Switch on the path object type and return the appropriate path. At // first glance, one may wonder why we don't use an interface to @@ -123,7 +120,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { // to an intermediate path object, than can be consumed and mapped by the // other version. - rootPrefix := []string{pm.root, pm.version} + rootPrefix := []string{storagePathRoot, storagePathVersion} repoPrefix := append(rootPrefix, "repositories") switch v := spec.(type) { @@ -136,7 +133,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil case manifestRevisionLinkPathSpec: - root, err := pm.path(manifestRevisionPathSpec{ + root, err := pathFor(manifestRevisionPathSpec{ name: v.name, revision: v.revision, }) @@ -147,7 +144,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(root, "link"), nil case manifestSignaturesPathSpec: - root, err := pm.path(manifestRevisionPathSpec{ + root, err := pathFor(manifestRevisionPathSpec{ name: v.name, revision: v.revision, }) @@ -158,10 +155,11 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(root, "signatures"), nil case manifestSignatureLinkPathSpec: - root, err := pm.path(manifestSignaturesPathSpec{ + root, err := pathFor(manifestSignaturesPathSpec{ name: v.name, revision: v.revision, }) + if err != nil { return "", err } @@ -175,50 +173,55 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { case manifestTagsPathSpec: return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil case manifestTagPathSpec: - root, err := pm.path(manifestTagsPathSpec{ + root, err := pathFor(manifestTagsPathSpec{ name: v.name, }) + if err != nil { return "", err } return path.Join(root, v.tag), nil case manifestTagCurrentPathSpec: - root, err := pm.path(manifestTagPathSpec{ + root, err := pathFor(manifestTagPathSpec{ name: v.name, tag: v.tag, }) + if err != nil { return "", err } return path.Join(root, "current", "link"), nil case manifestTagIndexPathSpec: - root, err := pm.path(manifestTagPathSpec{ + root, err := pathFor(manifestTagPathSpec{ name: v.name, tag: v.tag, }) + if err != nil { return "", err } return path.Join(root, "index"), nil case manifestTagIndexEntryLinkPathSpec: - root, err := pm.path(manifestTagIndexEntryPathSpec{ + root, err := pathFor(manifestTagIndexEntryPathSpec{ name: v.name, tag: v.tag, revision: v.revision, }) + if err != nil { return "", err } return path.Join(root, "link"), nil case manifestTagIndexEntryPathSpec: - root, err := pm.path(manifestTagIndexPathSpec{ + root, err := pathFor(manifestTagIndexPathSpec{ name: v.name, tag: v.tag, }) + if err != nil { return "", err } diff --git a/vendor/github.com/docker/distribution/registry/storage/paths_test.go b/vendor/github.com/docker/distribution/registry/storage/paths_test.go index 3d17b377..9e91a3fa 100644 --- a/vendor/github.com/docker/distribution/registry/storage/paths_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/paths_test.go @@ -7,10 +7,6 @@ import ( ) func TestPathMapper(t *testing.T) { - pm := &pathMapper{ - root: "/pathmapper-test", - } - for _, testcase := range []struct { spec pathSpec expected string @@ -21,14 +17,14 @@ func TestPathMapper(t *testing.T) { name: "foo/bar", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789", }, { spec: manifestRevisionLinkPathSpec{ name: "foo/bar", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link", }, { spec: manifestSignatureLinkPathSpec{ @@ -36,41 +32,41 @@ func TestPathMapper(t *testing.T) { revision: "sha256:abcdef0123456789", signature: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link", }, { spec: manifestSignaturesPathSpec{ name: "foo/bar", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures", }, { spec: manifestTagsPathSpec{ name: "foo/bar", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags", }, { spec: manifestTagPathSpec{ name: "foo/bar", tag: "thetag", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag", }, { spec: manifestTagCurrentPathSpec{ name: "foo/bar", tag: "thetag", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/current/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link", }, { spec: manifestTagIndexPathSpec{ name: "foo/bar", tag: "thetag", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index", }, { spec: manifestTagIndexEntryPathSpec{ @@ -78,7 +74,7 @@ func TestPathMapper(t *testing.T) { tag: "thetag", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789", }, { spec: manifestTagIndexEntryLinkPathSpec{ @@ -86,26 +82,26 @@ func TestPathMapper(t *testing.T) { tag: "thetag", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", }, { spec: layerLinkPathSpec{ name: "foo/bar", digest: "tarsum.v1+test:abcdef", }, - expected: "/pathmapper-test/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", + expected: "/docker/registry/v2/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", }, { spec: blobDataPathSpec{ digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"), }, - expected: "/pathmapper-test/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data", + expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data", }, { spec: blobDataPathSpec{ digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"), }, - expected: "/pathmapper-test/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data", + expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data", }, { @@ -113,17 +109,17 @@ func TestPathMapper(t *testing.T) { name: "foo/bar", id: "asdf-asdf-asdf-adsf", }, - expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", }, { spec: uploadStartedAtPathSpec{ name: "foo/bar", id: "asdf-asdf-asdf-adsf", }, - expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", }, } { - p, err := pm.path(testcase.spec) + p, err := pathFor(testcase.spec) if err != nil { t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) } @@ -136,9 +132,10 @@ func TestPathMapper(t *testing.T) { // Add a few test cases to ensure we cover some errors // Specify a path that requires a revision and get a digest validation error. - badpath, err := pm.path(manifestSignaturesPathSpec{ + badpath, err := pathFor(manifestSignaturesPathSpec{ name: "foo/bar", }) + if err == nil { t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) } diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go index c66f8881..7576b189 100644 --- a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go +++ b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -62,10 +62,11 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv uploads := make(map[string]uploadData, 0) inUploadDir := false - root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + root, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return uploads, append(errors, err) } + err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go index 18c98af8..3b70f723 100644 --- a/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/purgeuploads_test.go @@ -12,8 +12,6 @@ import ( "github.com/docker/distribution/uuid" ) -var pm = defaultPathMapper - func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { d := inmemory.New() ctx := context.Background() @@ -24,7 +22,7 @@ func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time. } func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { - dataPath, err := pm.path(uploadDataPathSpec{name: repo, id: uploadID}) + dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } @@ -32,7 +30,7 @@ func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploa t.Fatalf("Unable to write data file") } - startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, id: uploadID}) + startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } @@ -115,7 +113,7 @@ func TestPurgeOnlyUploads(t *testing.T) { // Create a directory tree outside _uploads and ensure // these files aren't deleted. - dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) + dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) if err != nil { t.Fatalf(err.Error()) } diff --git a/vendor/github.com/docker/distribution/registry/storage/registry.go b/vendor/github.com/docker/distribution/registry/storage/registry.go index c5058b80..5ef06d53 100644 --- a/vendor/github.com/docker/distribution/registry/storage/registry.go +++ b/vendor/github.com/docker/distribution/registry/storage/registry.go @@ -3,7 +3,7 @@ package storage import ( "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -12,49 +12,90 @@ import ( // package. All instances should descend from this object. type registry struct { blobStore *blobStore - blobServer distribution.BlobServer - statter distribution.BlobStatter // global statter service. + blobServer *blobServer + statter *blobStatter // global statter service. blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider deleteEnabled bool resumableDigestEnabled bool } -// NewRegistryWithDriver creates a new registry instance from the provided -// driver. The resulting registry may be shared by multiple goroutines but is -// cheap to allocate. If redirect is true, the backend blob server will -// attempt to use (StorageDriver).URLFor to serve all blobs. -// -// TODO(stevvooe): This function signature is getting very out of hand. Move to -// functional options for instance configuration. -func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider, deleteEnabled bool, redirect bool, isCache bool) distribution.Namespace { - // create global statter, with cache. - var statter distribution.BlobDescriptorService = &blobStatter{ - driver: driver, - pm: defaultPathMapper, - } +// RegistryOption is the type used for functional options for NewRegistry. +type RegistryOption func(*registry) error - if blobDescriptorCacheProvider != nil { - statter = cache.NewCachedBlobStatter(blobDescriptorCacheProvider, statter) +// EnableRedirect is a functional option for NewRegistry. It causes the backend +// blob server to attempt using (StorageDriver).URLFor to serve all blobs. +func EnableRedirect(registry *registry) error { + registry.blobServer.redirect = true + return nil +} + +// EnableDelete is a functional option for NewRegistry. It enables deletion on +// the registry. +func EnableDelete(registry *registry) error { + registry.deleteEnabled = true + return nil +} + +// DisableDigestResumption is a functional option for NewRegistry. It should be +// used if the registry is acting as a caching proxy. +func DisableDigestResumption(registry *registry) error { + registry.resumableDigestEnabled = false + return nil +} + +// BlobDescriptorCacheProvider returns a functional option for +// NewRegistry. It creates a cached blob statter for use by the +// registry. +func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { + // TODO(aaronl): The duplication of statter across several objects is + // ugly, and prevents us from using interface types in the registry + // struct. Ideally, blobStore and blobServer should be lazily + // initialized, and use the current value of + // blobDescriptorCacheProvider. + return func(registry *registry) error { + if blobDescriptorCacheProvider != nil { + statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) + registry.blobStore.statter = statter + registry.blobServer.statter = statter + registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider + } + return nil + } +} + +// NewRegistry creates a new registry instance from the provided driver. The +// resulting registry may be shared by multiple goroutines but is cheap to +// allocate. If the Redirect option is specified, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { + // create global statter + statter := &blobStatter{ + driver: driver, } bs := &blobStore{ driver: driver, - pm: defaultPathMapper, statter: statter, } - return ®istry{ + registry := ®istry{ blobStore: bs, blobServer: &blobServer{ - driver: driver, - statter: statter, - pathFn: bs.path, - redirect: redirect, + driver: driver, + statter: statter, + pathFn: bs.path, }, - blobDescriptorCacheProvider: blobDescriptorCacheProvider, - deleteEnabled: deleteEnabled, - resumableDigestEnabled: !isCache, + statter: statter, + resumableDigestEnabled: true, } + + for _, option := range options { + if err := option(registry); err != nil { + return nil, err + } + } + + return registry, nil } // Scope returns the namespace scope for a registry. The registry @@ -66,10 +107,10 @@ func (reg *registry) Scope() distribution.Scope { // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, name string) (distribution.Repository, error) { - if err := v2.ValidateRepositoryName(name); err != nil { +func (reg *registry) Repository(ctx context.Context, canonicalName string) (distribution.Repository, error) { + if _, err := reference.ParseNamed(canonicalName); err != nil { return nil, distribution.ErrRepositoryNameInvalid{ - Name: name, + Name: canonicalName, Reason: err, } } @@ -77,7 +118,7 @@ func (reg *registry) Repository(ctx context.Context, name string) (distribution. var descriptorCache distribution.BlobDescriptorService if reg.blobDescriptorCacheProvider != nil { var err error - descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(name) + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName) if err != nil { return nil, err } @@ -86,7 +127,7 @@ func (reg *registry) Repository(ctx context.Context, name string) (distribution. return &repository{ ctx: ctx, registry: reg, - name: name, + name: canonicalName, descriptorCache: descriptorCache, }, nil } @@ -108,6 +149,13 @@ func (repo *repository) Name() string { // may be context sensitive in the future. The instance should be used similar // to a request local. func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifestLinkPathFns := []linkPathFunc{ + // NOTE(stevvooe): Need to search through multiple locations since + // 2.1.0 unintentionally linked into _layers. + manifestRevisionLinkPath, + blobLinkPath, + } + ms := &manifestStore{ ctx: ctx, repository: repo, @@ -120,14 +168,15 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M repository: repo, deleteEnabled: repo.registry.deleteEnabled, blobAccessController: &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPath: manifestRevisionLinkPath, + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, }, // TODO(stevvooe): linkPath limits this blob store to only // manifests. This instance cannot be used for blob checks. - linkPath: manifestRevisionLinkPath, + linkPathFns: manifestLinkPathFns, + resumableDigestEnabled: repo.resumableDigestEnabled, }, }, tagStore: &tagStore{ @@ -153,9 +202,9 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M // to a request local. func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { var statter distribution.BlobDescriptorService = &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPath: blobLinkPath, + blobStore: repo.blobStore, + repository: repo, + linkPathFns: []linkPathFunc{blobLinkPath}, } if repo.descriptorCache != nil { @@ -171,8 +220,9 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { // TODO(stevvooe): linkPath limits this blob store to only layers. // This instance cannot be used for manifest checks. - linkPath: blobLinkPath, - deleteEnabled: repo.registry.deleteEnabled, + linkPathFns: []linkPathFunc{blobLinkPath}, + deleteEnabled: repo.registry.deleteEnabled, + resumableDigestEnabled: repo.resumableDigestEnabled, } } diff --git a/vendor/github.com/docker/distribution/registry/storage/revisionstore.go b/vendor/github.com/docker/distribution/registry/storage/revisionstore.go index 9dea78e8..ed2d5dd3 100644 --- a/vendor/github.com/docker/distribution/registry/storage/revisionstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/revisionstore.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/libtrust" ) @@ -18,7 +18,7 @@ type revisionStore struct { } // get retrieves the manifest, keyed by revision digest. -func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*manifest.SignedManifest, error) { +func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*schema1.SignedManifest, error) { // Ensure that this revision is available in this repository. _, err := rs.blobStore.Stat(ctx, revision) if err != nil { @@ -64,7 +64,7 @@ func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*mani return nil, err } - var sm manifest.SignedManifest + var sm schema1.SignedManifest if err := json.Unmarshal(raw, &sm); err != nil { return nil, err } @@ -74,7 +74,7 @@ func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*mani // put stores the manifest in the repository, if not already present. Any // updated signatures will be stored, as well. -func (rs *revisionStore) put(ctx context.Context, sm *manifest.SignedManifest) (distribution.Descriptor, error) { +func (rs *revisionStore) put(ctx context.Context, sm *schema1.SignedManifest) (distribution.Descriptor, error) { // Resolve the payload in the manifest. payload, err := sm.Payload() if err != nil { @@ -82,7 +82,7 @@ func (rs *revisionStore) put(ctx context.Context, sm *manifest.SignedManifest) ( } // Digest and store the manifest payload in the blob store. - revision, err := rs.blobStore.Put(ctx, manifest.ManifestMediaType, payload) + revision, err := rs.blobStore.Put(ctx, schema1.ManifestMediaType, payload) if err != nil { context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) return distribution.Descriptor{}, err diff --git a/vendor/github.com/docker/distribution/registry/storage/signaturestore.go b/vendor/github.com/docker/distribution/registry/storage/signaturestore.go index 78fd2e6c..f5888f64 100644 --- a/vendor/github.com/docker/distribution/registry/storage/signaturestore.go +++ b/vendor/github.com/docker/distribution/registry/storage/signaturestore.go @@ -26,7 +26,7 @@ func newSignatureStore(ctx context.Context, repo *repository, blobStore *blobSto var _ distribution.SignatureService = &signatureStore{} func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { - signaturesPath, err := s.blobStore.pm.path(manifestSignaturesPathSpec{ + signaturesPath, err := pathFor(manifestSignaturesPathSpec{ name: s.repository.Name(), revision: dgst, }) @@ -119,12 +119,13 @@ func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { // manifest with the given digest. Effectively, each signature link path // layout is a unique linked blob store. func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { - linkpath := func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { - return pm.path(manifestSignatureLinkPathSpec{ + linkpath := func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestSignatureLinkPathSpec{ name: name, revision: revision, signature: dgst, }) + } return &linkedBlobStore{ @@ -132,10 +133,10 @@ func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Di repository: s.repository, blobStore: s.blobStore, blobAccessController: &linkedBlobStatter{ - blobStore: s.blobStore, - repository: s.repository, - linkPath: linkpath, + blobStore: s.blobStore, + repository: s.repository, + linkPathFns: []linkPathFunc{linkpath}, }, - linkPath: linkpath, + linkPathFns: []linkPathFunc{linkpath}, } } diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore.go b/vendor/github.com/docker/distribution/registry/storage/tagstore.go index a74d9b09..aec95286 100644 --- a/vendor/github.com/docker/distribution/registry/storage/tagstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore.go @@ -18,9 +18,10 @@ type tagStore struct { // tags lists the manifest tags for the specified repository. func (ts *tagStore) tags() ([]string, error) { - p, err := ts.blobStore.pm.path(manifestTagPathSpec{ + p, err := pathFor(manifestTagPathSpec{ name: ts.repository.Name(), }) + if err != nil { return nil, err } @@ -47,10 +48,11 @@ func (ts *tagStore) tags() ([]string, error) { // exists returns true if the specified manifest tag exists in the repository. func (ts *tagStore) exists(tag string) (bool, error) { - tagPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + tagPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name(), tag: tag, }) + if err != nil { return false, err } @@ -66,7 +68,7 @@ func (ts *tagStore) exists(tag string) (bool, error) { // tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. func (ts *tagStore) tag(tag string, revision digest.Digest) error { - currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + currentPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name(), tag: tag, }) @@ -87,10 +89,11 @@ func (ts *tagStore) tag(tag string, revision digest.Digest) error { // resolve the current revision for name and tag. func (ts *tagStore) resolve(tag string) (digest.Digest, error) { - currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + currentPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name(), tag: tag, }) + if err != nil { return "", err } @@ -111,10 +114,11 @@ func (ts *tagStore) resolve(tag string) (digest.Digest, error) { // delete removes the tag from repository, including the history of all // revisions that have the specified tag. func (ts *tagStore) delete(tag string) error { - tagPath, err := ts.blobStore.pm.path(manifestTagPathSpec{ + tagPath, err := pathFor(manifestTagPathSpec{ name: ts.repository.Name(), tag: tag, }) + if err != nil { return err } @@ -122,7 +126,7 @@ func (ts *tagStore) delete(tag string) error { return ts.blobStore.driver.Delete(ts.ctx, tagPath) } -// namedBlobStore returns the namedBlobStore for the named tag, allowing one +// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one // to index manifest blobs by tag name. While the tag store doesn't map // precisely to the linked blob store, using this ensures the links are // managed via the same code path. @@ -131,13 +135,13 @@ func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlob blobStore: ts.blobStore, repository: ts.repository, ctx: ctx, - linkPath: func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { - return pm.path(manifestTagIndexEntryLinkPathSpec{ + linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestTagIndexEntryLinkPathSpec{ name: name, tag: tag, revision: dgst, }) - }, - } + }}, + } } diff --git a/vendor/github.com/docker/distribution/registry/storage/vacuum.go b/vendor/github.com/docker/distribution/registry/storage/vacuum.go index 46b8096b..60d5a2fa 100644 --- a/vendor/github.com/docker/distribution/registry/storage/vacuum.go +++ b/vendor/github.com/docker/distribution/registry/storage/vacuum.go @@ -18,13 +18,11 @@ func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { return Vacuum{ ctx: ctx, driver: driver, - pm: defaultPathMapper, } } // Vacuum removes content from the filesystem type Vacuum struct { - pm *pathMapper driver driver.StorageDriver ctx context.Context } @@ -36,7 +34,7 @@ func (v Vacuum) RemoveBlob(dgst string) error { return err } - blobPath, err := v.pm.path(blobDataPathSpec{digest: d}) + blobPath, err := pathFor(blobDataPathSpec{digest: d}) if err != nil { return err } @@ -52,7 +50,7 @@ func (v Vacuum) RemoveBlob(dgst string) error { // RemoveRepository removes a repository directory from the // filesystem func (v Vacuum) RemoveRepository(repoName string) error { - rootForRepository, err := v.pm.path(repositoriesRootPathSpec{}) + rootForRepository, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return err } diff --git a/vendor/github.com/docker/distribution/registry/storage/walk.go b/vendor/github.com/docker/distribution/registry/storage/walk.go index 8290f167..3d891276 100644 --- a/vendor/github.com/docker/distribution/registry/storage/walk.go +++ b/vendor/github.com/docker/distribution/registry/storage/walk.go @@ -8,7 +8,7 @@ import ( storageDriver "github.com/docker/distribution/registry/storage/driver" ) -// SkipDir is used as a return value from onFileFunc to indicate that +// ErrSkipDir is used as a return value from onFileFunc to indicate that // the directory named in the call is to be skipped. It is not returned // as an error by any function. var ErrSkipDir = errors.New("skip this directory") diff --git a/vendor/github.com/docker/distribution/testutil/handler.go b/vendor/github.com/docker/distribution/testutil/handler.go deleted file mode 100644 index 00cd8a6a..00000000 --- a/vendor/github.com/docker/distribution/testutil/handler.go +++ /dev/null @@ -1,148 +0,0 @@ -package testutil - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strings" -) - -// RequestResponseMap is an ordered mapping from Requests to Responses -type RequestResponseMap []RequestResponseMapping - -// RequestResponseMapping defines a Response to be sent in response to a given -// Request -type RequestResponseMapping struct { - Request Request - Response Response -} - -// Request is a simplified http.Request object -type Request struct { - // Method is the http method of the request, for example GET - Method string - - // Route is the http route of this request - Route string - - // QueryParams are the query parameters of this request - QueryParams map[string][]string - - // Body is the byte contents of the http request - Body []byte - - // Headers are the header for this request - Headers http.Header -} - -func (r Request) String() string { - queryString := "" - if len(r.QueryParams) > 0 { - keys := make([]string, 0, len(r.QueryParams)) - queryParts := make([]string, 0, len(r.QueryParams)) - for k := range r.QueryParams { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - for _, val := range r.QueryParams[k] { - queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) - } - } - queryString = "?" + strings.Join(queryParts, "&") - } - var headers []string - if len(r.Headers) > 0 { - var headerKeys []string - for k := range r.Headers { - headerKeys = append(headerKeys, k) - } - sort.Strings(headerKeys) - - for _, k := range headerKeys { - for _, val := range r.Headers[k] { - headers = append(headers, fmt.Sprintf("%s:%s", k, val)) - } - } - - } - return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) -} - -// Response is a simplified http.Response object -type Response struct { - // Statuscode is the http status code of the Response - StatusCode int - - // Headers are the http headers of this Response - Headers http.Header - - // Body is the response body - Body []byte -} - -// testHandler is an http.Handler with a defined mapping from Request to an -// ordered list of Response objects -type testHandler struct { - responseMap map[string][]Response -} - -// NewHandler returns a new test handler that responds to defined requests -// with specified responses -// Each time a Request is received, the next Response is returned in the -// mapping, until no Responses are defined, at which point a 404 is sent back -func NewHandler(requestResponseMap RequestResponseMap) http.Handler { - responseMap := make(map[string][]Response) - for _, mapping := range requestResponseMap { - responses, ok := responseMap[mapping.Request.String()] - if ok { - responseMap[mapping.Request.String()] = append(responses, mapping.Response) - } else { - responseMap[mapping.Request.String()] = []Response{mapping.Response} - } - } - return &testHandler{responseMap: responseMap} -} - -func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - requestBody, _ := ioutil.ReadAll(r.Body) - request := Request{ - Method: r.Method, - Route: r.URL.Path, - QueryParams: r.URL.Query(), - Body: requestBody, - Headers: make(map[string][]string), - } - - // Add headers of interest here - for k, v := range r.Header { - if k == "If-None-Match" { - request.Headers[k] = v - } - } - - responses, ok := app.responseMap[request.String()] - - if !ok || len(responses) == 0 { - http.NotFound(w, r) - return - } - - response := responses[0] - app.responseMap[request.String()] = responses[1:] - - responseHeader := w.Header() - for k, v := range response.Headers { - responseHeader[k] = v - } - - w.WriteHeader(response.StatusCode) - - io.Copy(w, bytes.NewReader(response.Body)) -} diff --git a/vendor/github.com/docker/distribution/testutil/tarfile.go b/vendor/github.com/docker/distribution/testutil/tarfile.go deleted file mode 100644 index 08b796f5..00000000 --- a/vendor/github.com/docker/distribution/testutil/tarfile.go +++ /dev/null @@ -1,95 +0,0 @@ -package testutil - -import ( - "archive/tar" - "bytes" - "crypto/rand" - "fmt" - "io" - "io/ioutil" - mrand "math/rand" - "time" - - "github.com/docker/docker/pkg/tarsum" -) - -// CreateRandomTarFile creates a random tarfile, returning it as an -// io.ReadSeeker along with its tarsum. An error is returned if there is a -// problem generating valid content. -func CreateRandomTarFile() (rs io.ReadSeeker, tarSum string, err error) { - nFiles := mrand.Intn(10) + 10 - target := &bytes.Buffer{} - wr := tar.NewWriter(target) - - // Perturb this on each iteration of the loop below. - header := &tar.Header{ - Mode: 0644, - ModTime: time.Now(), - Typeflag: tar.TypeReg, - Uname: "randocalrissian", - Gname: "cloudcity", - AccessTime: time.Now(), - ChangeTime: time.Now(), - } - - for fileNumber := 0; fileNumber < nFiles; fileNumber++ { - fileSize := mrand.Int63n(1<<20) + 1<<20 - - header.Name = fmt.Sprint(fileNumber) - header.Size = fileSize - - if err := wr.WriteHeader(header); err != nil { - return nil, "", err - } - - randomData := make([]byte, fileSize) - - // Fill up the buffer with some random data. - n, err := rand.Read(randomData) - - if n != len(randomData) { - return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) - } - - if err != nil { - return nil, "", err - } - - nn, err := io.Copy(wr, bytes.NewReader(randomData)) - if nn != fileSize { - return nil, "", fmt.Errorf("short copy writing random file to tar") - } - - if err != nil { - return nil, "", err - } - - if err := wr.Flush(); err != nil { - return nil, "", err - } - } - - if err := wr.Close(); err != nil { - return nil, "", err - } - - reader := bytes.NewReader(target.Bytes()) - - // A tar builder that supports tarsum inline calculation would be awesome - // here. - ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1) - if err != nil { - return nil, "", err - } - - nn, err := io.Copy(ioutil.Discard, ts) - if nn != int64(len(target.Bytes())) { - return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes())) - } - - if err != nil { - return nil, "", err - } - - return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil -} diff --git a/vendor/github.com/docker/distribution/version/print.go b/vendor/github.com/docker/distribution/version/print.go deleted file mode 100644 index a82bce39..00000000 --- a/vendor/github.com/docker/distribution/version/print.go +++ /dev/null @@ -1,26 +0,0 @@ -package version - -import ( - "fmt" - "io" - "os" -) - -// FprintVersion outputs the version string to the writer, in the following -// format, followed by a newline: -// -// -// -// For example, a binary "registry" built from github.com/docker/distribution -// with version "v2.0" would print the following: -// -// registry github.com/docker/distribution v2.0 -// -func FprintVersion(w io.Writer) { - fmt.Fprintln(w, os.Args[0], Package, Version) -} - -// PrintVersion outputs the version information, from Fprint, to stdout. -func PrintVersion() { - FprintVersion(os.Stdout) -} diff --git a/vendor/github.com/docker/distribution/version/version.go b/vendor/github.com/docker/distribution/version/version.go deleted file mode 100644 index 3a542f9b..00000000 --- a/vendor/github.com/docker/distribution/version/version.go +++ /dev/null @@ -1,11 +0,0 @@ -package version - -// Package is the overall, canonical project import path under which the -// package was built. -var Package = "github.com/docker/distribution" - -// Version indicates which version of the binary is running. This is set to -// the latest release tag by hand, always suffixed by "+unknown". During -// build, it will be replaced by the actual version. The value here will be -// used if the registry is run after a go get based install. -var Version = "v2.0.0+unknown" diff --git a/vendor/github.com/docker/distribution/version/version.sh b/vendor/github.com/docker/distribution/version/version.sh deleted file mode 100755 index 53e29ce9..00000000 --- a/vendor/github.com/docker/distribution/version/version.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# This bash script outputs the current, desired content of version.go, using -# git describe. For best effect, pipe this to the target file. Generally, this -# only needs to updated for releases. The actual value of will be replaced -# during build time if the makefile is used. - -set -e - -cat <` filter to `docker ps --filter` flag to filter +containers based on their ancestor images (#14570) ++ Add `label=` filter to `docker ps --filter` to filter containers +based on label (#16530) ++ Add `--kernel-memory` flag to `docker run` (#14006) ++ Add `--message` flag to `docker import` allowing to specify an optional +message (#15711) ++ Add `--privileged` flag to `docker exec` (#14113) ++ Add `--stop-signal` flag to `docker run` allowing to replace the container +process stopping signal (#15307) ++ Add a new `unless-stopped` restart policy (#15348) ++ Inspecting an image now returns tags (#13185) ++ Add container size information to `docker inspect` (#15796) ++ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) +- Remove the deprecated `/container/ps` endpoint from the API (#15972) +- Send and document correct HTTP codes for `/exec//start` (#16250) +- Share shm and mqueue between containers sharing IPC namespace (#15862) +- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) +- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted +with `ro` option (#14965) +- Improve `rmi` performance (#16890) +- Do not update /etc/hosts for the default bridge network, except for links (#17325) +- Fix conflict with duplicate container names (#17389) +- Fix an issue with incorrect template execution in `docker inspect` (#17284) +- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) + +## Client + ++ Allow `docker import` to import from local files (#11907) + +## Builder + ++ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different +stop-signal for the container process (#15307) ++ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` +that allows to add build-time environment variables (#15182) +- Improve cache miss performance (#16890) + +## Storage + +- devicemapper: Implement deferred deletion capability (#16381) + +## Networking + ++ `docker network` exits experimental and is part of standard release (#16645) ++ New network top-level concept, with associated subcommands and API (#16645) + WARNING: the API is different from the experimental API ++ Support for multiple isolated/micro-segmented networks (#16645) ++ Built-in multihost networking using VXLAN based overlay driver (#14071) ++ Support for third-party network plugins (#13424) ++ Ability to dynamically connect containers to multiple networks (#16645) ++ Support for user-defined IP address management via pluggable IPAM drivers (#16910) ++ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) ++ Add `--cluster-store-opt` for setting up TLS settings (#16644) ++ Add `--dns-opt` to the daemon (#16031) +- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, + `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. + Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect + the networking settings of a container per network. + +## Volumes + ++ New top-level `volume` subcommand and API (#14242) +- Move API volume driver settings to host-specific config (#15798) +- Print an error message if volume name is not unique (#16009) +- Ensure volumes created from Dockerfiles always use the local volume driver +(#15507) +- DEPRECATE auto-creating missing host paths for bind mounts (#16349) + +## Logging + ++ Add `awslogs` logging driver for Amazon CloudWatch (#15495) ++ Add generic `tag` log option to allow customizing container/image +information passed to driver (e.g. show container names) (#15384) +- Implement the `docker logs` endpoint for the journald driver (#13707) +- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) + +## Distribution + ++ `docker search` now works with partial names (#16509) +- Push optimization: avoid buffering to file (#15493) +- The daemon will display progress for images that were already being pulled +by another client (#15489) +- Only permissions required for the current action being performed are requested (#) ++ Renaming trust keys (and respective environment variables) from `offline` to +`root` and `tagging` to `repository` (#16894) +- DEPRECATE trust key environment variables +`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and +`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) + +## Security + ++ Add SELinux profiles to the rpm package (#15832) +- Fix various issues with AppArmor profiles provided in the deb package +(#14609) +- Add AppArmor policy that prevents writing to /proc (#15571) + +## 1.8.3 (2015-10-12) + +### Distribution + +- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) +- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) ++ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry + +## 1.8.2 (2015-09-10) + +### Distribution + +- Fixes rare edge case of handling GNU LongLink and LongName entries. +- Fix ^C on docker pull. +- Fix docker pull issues on client disconnection. +- Fix issue that caused the daemon to panic when loggers weren't configured properly. +- Fix goroutine leak pulling images from registry V2. + +### Runtime + +- Fix a bug mounting cgroups for docker daemons running inside docker containers. +- Initialize log configuration properly. + +### Client: + +- Handle `-q` flag in `docker ps` properly when there is a default format. + +### Networking + +- Fix several corner cases with netlink. + +### Contrib + +- Fix several issues with bash completion. + +## 1.8.1 (2015-08-12) + +### Distribution + +* Fix a bug where pushing multiple tags would result in invalid images + +## 1.8.0 (2015-08-11) + +### Distribution + ++ Trusted pull, push and build, disabled by default +* Make tar layers deterministic between registries +* Don't allow deleting the image of running containers +* Check if a tag name to load is a valid digest +* Allow one character repository names +* Add a more accurate error description for invalid tag name +* Make build cache ignore mtime + +### Cli + ++ Add support for DOCKER_CONFIG/--config to specify config file dir ++ Add --type flag for docker inspect command ++ Add formatting options to `docker ps` with `--format` ++ Replace `docker -d` with new subcommand `docker daemon` +* Zsh completion updates and improvements +* Add some missing events to bash completion +* Support daemon urls with base paths in `docker -H` +* Validate status= filter to docker ps +* Display when a container is in --net=host in docker ps +* Extend docker inspect to export image metadata related to graph driver +* Restore --default-gateway{,-v6} daemon options +* Add missing unpublished ports in docker ps +* Allow duration strings in `docker events` as --since/--until +* Expose more mounts information in `docker inspect` + +### Runtime + ++ Add new Fluentd logging driver ++ Allow `docker import` to load from local files ++ Add logging driver for GELF via UDP ++ Allow to copy files from host to containers with `docker cp` ++ Promote volume drivers from experimental to master ++ Add rollover options to json-file log driver, and --log-driver-opts flag ++ Add memory swappiness tuning options +* Remove cgroup read-only flag when privileged +* Make /proc, /sys, & /dev readonly for readonly containers +* Add cgroup bind mount by default +* Overlay: Export metadata for container and image in `docker inspect` +* Devicemapper: external device activation +* Devicemapper: Compare uuid of base device on startup +* Remove RC4 from the list of registry cipher suites +* Add syslog-facility option +* LXC execdriver compatibility with recent LXC versions +* Mark LXC execriver as deprecated (to be removed with the migration to runc) + +### Plugins + +* Separate plugin sockets and specs locations +* Allow TLS connections to plugins + +### Bug fixes + +- Add missing 'Names' field to /containers/json API output +- Make `docker rmi` of dangling images safe while pulling +- Devicemapper: Change default basesize to 100G +- Go Scheduler issue with sync.Mutex and gcc +- Fix issue where Search API endpoint would panic due to empty AuthConfig +- Set image canonical names correctly +- Check dockerinit only if lxc driver is used +- Fix ulimit usage of nproc +- Always attach STDIN if -i,--interactive is specified +- Show error messages when saving container state fails +- Fixed incorrect assumption on --bridge=none treated as disable network +- Check for invalid port specifications in host configuration +- Fix endpoint leave failure for --net=host mode +- Fix goroutine leak in the stats API if the container is not running +- Check for apparmor file before reading it +- Fix DOCKER_TLS_VERIFY being ignored +- Set umask to the default on startup +- Correct the message of pause and unpause a non-running container +- Adjust disallowed CpuShares in container creation +- ZFS: correctly apply selinux context +- Display empty string instead of when IP opt is nil +- `docker kill` returns error when container is not running +- Fix COPY/ADD quoted/json form +- Fix goroutine leak on logs -f with no output +- Remove panic in nat package on invalid hostport +- Fix container linking in Fedora 22 +- Fix error caused using default gateways outside of the allocated range +- Format times in inspect command with a template as RFC3339Nano +- Make registry client to accept 2xx and 3xx http status responses as successful +- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. +- Fix error when the docker ps format was not valid. +- Remove redundant ip forward check. +- Fix issue trying to push images to repository mirrors. +- Fix error cleaning up network entrypoints when there is an initialization issue. + ## 1.7.1 (2015-07-14) #### Runtime @@ -144,7 +384,7 @@ #### Notable Features since 1.3.0 + Set key=value labels to the daemon (displayed in `docker info`), applied with new `-label` daemon flag -+ Add support for `ENV` in Dockerfile of the form: ++ Add support for `ENV` in Dockerfile of the form: `ENV name=value name2=value2...` + New Overlayfs Storage Driver + `docker info` now returns an `ID` and `Name` field @@ -606,7 +846,7 @@ - Fix broken images API for version less than 1.7 - Use the right encoding for all API endpoints which return JSON - Move remote api client to api/ -- Queue calls to the API using generic socket wait +- Queue calls to the API using generic socket wait #### Runtime @@ -686,7 +926,7 @@ With the ongoing changes to the networking and execution subsystems of docker te - Do not add hostname when networking is disabled * Return most recent image from the cache by date - Return all errors from docker wait -* Add Content-Type Header "application/json" to GET /version and /info responses +* Add Content-Type Header "application/json" to GET /version and /info responses #### Other @@ -714,7 +954,7 @@ With the ongoing changes to the networking and execution subsystems of docker te #### Runtime - Only get the image's rootfs when we need to calculate the image size -- Correctly handle unmapping UDP ports +- Correctly handle unmapping UDP ports * Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build - Fix login message to say pull instead of push - Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN diff --git a/vendor/github.com/docker/docker/CONTRIBUTING.md b/vendor/github.com/docker/docker/CONTRIBUTING.md index 824d1821..4db7e899 100644 --- a/vendor/github.com/docker/docker/CONTRIBUTING.md +++ b/vendor/github.com/docker/docker/CONTRIBUTING.md @@ -25,7 +25,7 @@ The Docker maintainers take security seriously. If you discover a security issue, please bring it to their attention right away! Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com), +[security@docker.com](mailto:security@docker.com). Security reports are greatly appreciated and we will publicly thank you for it. We also like to send gifts—if you're into Docker schwag, make sure to let @@ -42,14 +42,17 @@ and will thank you for it! Check that [our issue database](https://github.com/docker/docker/issues) doesn't already include that problem or suggestion before submitting an issue. If you find a match, add a quick "+1" or "I have this problem too." Doing this -helps prioritize the most common problems and requests. +helps prioritize the most common problems and requests. **DO NOT DO THAT** to +subscribe to the issue unless you have something meaningful to add to the +conversation. The best way to subscribe the issue is by clicking Subscribe +button in top right of the page. When reporting issues, please include your host OS (Ubuntu 12.04, Fedora 19, etc). Please include: * The output of `uname -a`. * The output of `docker version`. -* The output of `docker -D info`. +* The output of `docker info`. Please also include the steps required to reproduce the problem if possible and applicable. This information will help us review and fix your issue faster. @@ -163,7 +166,7 @@ However, there might be a way to implement that feature *on top of* Docker. Stack Overflow - Stack Overflow has over 7000K Docker questions listed. We regularly + Stack Overflow has over 17000 Docker questions listed. We regularly monitor Docker questions and so do many other knowledgeable Docker users. @@ -224,7 +227,7 @@ high majority of submissions should have a single commit, so if in doubt: squash down to one. After every commit, [make sure the test suite passes] -((https://docs.docker.com/project/test-and-docs/)). Include documentation +(https://docs.docker.com/project/test-and-docs/). Include documentation changes in the same pull request so that a revert would remove all traces of the feature or fix. diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile index 51b6cf08..43f8f64c 100644 --- a/vendor/github.com/docker/docker/Dockerfile +++ b/vendor/github.com/docker/docker/Dockerfile @@ -40,13 +40,16 @@ RUN apt-get update && apt-get install -y \ createrepo \ curl \ dpkg-sig \ + gcc-mingw-w64 \ git \ iptables \ libapparmor-dev \ libcap-dev \ libsqlite3-dev \ + libsystemd-journal-dev \ mercurial \ parallel \ + pkg-config \ python-mock \ python-pip \ python-websocket \ @@ -69,23 +72,11 @@ RUN cd /usr/local/lvm2 \ && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL -# Install lxc -ENV LXC_VERSION 1.1.2 -RUN mkdir -p /usr/src/lxc \ - && curl -sSL https://linuxcontainers.org/downloads/lxc/lxc-${LXC_VERSION}.tar.gz | tar -v -C /usr/src/lxc/ -xz --strip-components=1 -RUN cd /usr/src/lxc \ - && ./configure \ - && make \ - && make install \ - && ldconfig - # Install Go -ENV GO_VERSION 1.4.2 -RUN curl -sSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/local -xz \ - && mkdir -p /go/bin +ENV GO_VERSION 1.5.1 +RUN curl -sSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar -v -C /usr/local -xz ENV PATH /go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go:/go/src/github.com/docker/docker/vendor -RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation ENV DOCKER_CROSSPLATFORMS \ @@ -96,20 +87,12 @@ ENV DOCKER_CROSSPLATFORMS \ # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 -RUN cd /usr/local/go/src \ - && set -x \ - && for platform in $DOCKER_CROSSPLATFORMS; do \ - GOOS=${platform%/*} \ - GOARCH=${platform##*/} \ - ./make.bash --no-clean 2>&1; \ - done # This has been commented out and kept as reference because we don't support compiling with older Go anymore. # ENV GOFMT_VERSION 1.3.3 # RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt -# Update this sha when we upgrade to go 1.5.0 -ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9 +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 # Grab Go's cover tool for dead-simple code coverage testing # Grab Go's vet tool for examining go code to find suspicious constructs # and help prevent errors that the compiler might not catch @@ -118,7 +101,7 @@ RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ && go install -v golang.org/x/tools/cmd/cover \ && go install -v golang.org/x/tools/cmd/vet # Grab Go's lint tool -ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ && go install -v github.com/golang/lint/golint @@ -127,7 +110,7 @@ RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint RUN gem install --no-rdoc --no-ri fpm --version 1.3.2 # Install registry -ENV REGISTRY_COMMIT 2317f721a3d8428215a2b65da4ae85212ed473b4 +ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ @@ -137,7 +120,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install notary server -ENV NOTARY_COMMIT 77bced079e83d80f40c1f0a544b1a8a3b97fb052 +ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ @@ -147,10 +130,11 @@ RUN set -x \ && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 8a87001d09852058f08a807ab6e8491d57ca1e88 +ENV DOCKER_PY_COMMIT 47ab89ec2bd3bddf1221b856ffbaff333edeabb4 RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt # Setup s3cmd config RUN { \ @@ -179,7 +163,7 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image.sh /docker-frozen-images \ - busybox:latest@8c2e06607696bd4afb3d03b687e361cc43cf8ec1a4a725bc96e39f05ba97dd55 \ + busybox:latest@d7057cb020844f245031d27b76cb18af05db1cc3a96a29fa7777af75f5ac91a3 \ hello-world:frozen@91c95931e552b11604fea91c2f537284149ec32fff0f700a4769cfd31d7696ae \ jess/unshare@5c9f6ea50341a2a8eb6677527f2bdedbf331ae894a41714fda770fb130f3314d # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) diff --git a/vendor/github.com/docker/docker/Dockerfile.gccgo b/vendor/github.com/docker/docker/Dockerfile.gccgo new file mode 100644 index 00000000..1392e723 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.gccgo @@ -0,0 +1,62 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.gccgo . +# + +FROM gcc:5.2 + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + btrfs-tools \ + build-essential \ + curl \ + git \ + iptables \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + mercurial \ + parallel \ + python-mock \ + python-pip \ + python-websocket \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT 139850f3f3b17357bab5ba3edfb745fb14043764 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.simple b/vendor/github.com/docker/docker/Dockerfile.simple index 12ee7dde..507018b0 100644 --- a/vendor/github.com/docker/docker/Dockerfile.simple +++ b/vendor/github.com/docker/docker/Dockerfile.simple @@ -26,7 +26,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ xz-utils \ \ aufs-tools \ - lxc \ && rm -rf /var/lib/apt/lists/* ENV AUTO_GOPATH 1 diff --git a/vendor/github.com/docker/docker/MAINTAINERS b/vendor/github.com/docker/docker/MAINTAINERS index e3bcb581..502cad3a 100644 --- a/vendor/github.com/docker/docker/MAINTAINERS +++ b/vendor/github.com/docker/docker/MAINTAINERS @@ -141,7 +141,7 @@ editor, and thus asking them to `git commit --amend -s` is not the best way forw In this case, maintainers can update the commits based on clause (c) of the DCO. The most trivial way for a contributor to allow the maintainer to do this, is to add a DCO signature in a Pull Requests's comment, or a maintainer can simply note that -the change is sufficiently trivial that it does not substantivly change the existing +the change is sufficiently trivial that it does not substantially change the existing contribution - i.e., a spelling change. When you add someone's DCO, please also add your own to keep a log. @@ -183,16 +183,6 @@ made through a pull request. # be approved by the chief architect. "Chief Architect" = "shykes" - # The Chief Operator is responsible for the day-to-day operations of the project including: - # - facilitating communications amongst all the contributors; - # - tracking release schedules; - # - managing the relationship with downstream distributions and upstream dependencies; - # - helping new contributors to get involved and become successful contributors and maintainers - # - # The role is also responsible for managing and measuring the success of the overall project - # and ensuring it is governed properly working in concert with the Docker Governance Advisory Board (DGAB). - "Chief Operator" = "spf13" - [Org.Operators] # The operators make sure the trains run on time. They are responsible for overall operations @@ -264,8 +254,6 @@ made through a pull request. # # It is common for core maintainers to "branch out" to join or start a subsystem. - - people = [ "calavera", "crosbymichael", @@ -278,11 +266,11 @@ made through a pull request. "tibor", "unclejack", "vbatts", + "vdemeester", "vieux", "vishh" ] - [Org.Subsystems] # As the project grows, it gets separated into well-defined subsystems. Each subsystem @@ -309,7 +297,7 @@ made through a pull request. # - If the pull request has some small problems that need to be changed, make # a comment adressing the issues. # - If the changes needed to a PR are small, you can add a "LGTM once the - # following comments are adressed..." this will reduce needless back and + # following comments are addressed..." this will reduce needless back and # forth. # - If the PR only needs a few changes before being merged, any MAINTAINER can # make a replacement PR that incorporates the existing commits and fixes the @@ -336,12 +324,10 @@ made through a pull request. [Org.Subsystems.Documentation] people = [ - "fredlf", "james", "moxiegirl", "thaJeztah", "jamtur01", - "spf13", "sven" ] @@ -502,11 +488,6 @@ made through a pull request. Email = "estesp@linux.vnet.ibm.com" GitHub = "estesp" - [people.fredlf] - Name = "Fred Lifton" - Email = "fred.lifton@docker.com" - GitHub = "fredlf" - [people.icecrime] Name = "Arnaud Porterie" Email = "arnaud@docker.com" @@ -522,6 +503,11 @@ made through a pull request. Email = "josh.hawn@docker.com" Github = "jlhawn" + [people.jnagal] + Name = "Rohit Jnagal" + Email = "jnagal@google.com" + GitHub = "rjnagal" + [people.lk4d4] Name = "Alexander Morozov" Email = "lk4d4@docker.com" @@ -532,6 +518,11 @@ made through a pull request. Email = "mary.anthony@docker.com" GitHub = "moxiegirl" + [people.mpatel] + Name = "Mrunal Patel" + Email = "mpatel@redhat.com" + GitHub = "mrunalp" + [people.nathanmccauley] Name = "Nathan McCauley" Email = "nathan.mccauley@docker.com" @@ -552,11 +543,6 @@ made through a pull request. Email = "solomon@docker.com" GitHub = "shykes" - [people.spf13] - Name = "Steve Francia" - Email = "steve.francia@gmail.com" - GitHub = "spf13" - [people.sven] Name = "Sven Dowideit" Email = "SvenDowideit@home.org.au" @@ -582,37 +568,32 @@ made through a pull request. Email = "tibor@docker.com" GitHub = "tiborvass" + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + GitHub = "unclejack" + [people.vbatts] Name = "Vincent Batts" Email = "vbatts@redhat.com" GitHub = "vbatts" + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + [people.vieux] Name = "Victor Vieux" Email = "vieux@docker.com" GitHub = "vieux" - [people.vmarmol] - Name = "Victor Marmol" - Email = "vmarmol@google.com" - GitHub = "vmarmol" - - [people.jnagal] - Name = "Rohit Jnagal" - Email = "jnagal@google.com" - GitHub = "rjnagal" - - [people.mpatel] - Name = "Mrunal Patel" - Email = "mpatel@redhat.com" - GitHub = "mrunalp" - - [people.unclejack] - Name = "Cristian Staretu" - Email = "cristian.staretu@gmail.com" - GitHub = "unclejack" - [people.vishh] Name = "Vishnu Kannan" Email = "vishnuk@google.com" GitHub = "vishh" + + [people.vmarmol] + Name = "Victor Marmol" + Email = "vmarmol@google.com" + GitHub = "vmarmol" diff --git a/vendor/github.com/docker/docker/Makefile b/vendor/github.com/docker/docker/Makefile index a3d3a0c8..c1f58806 100644 --- a/vendor/github.com/docker/docker/Makefile +++ b/vendor/github.com/docker/docker/Makefile @@ -7,8 +7,8 @@ DOCKER_ENVS := \ -e BUILDFLAGS \ -e DOCKER_CLIENTONLY \ -e DOCKER_DEBUG \ - -e DOCKER_EXECDRIVER \ -e DOCKER_EXPERIMENTAL \ + -e DOCKER_REMAP_ROOT \ -e DOCKER_GRAPHDRIVER \ -e DOCKER_STORAGE_OPTS \ -e DOCKER_USERLANDPROXY \ @@ -28,12 +28,17 @@ GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" +DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) -DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif -# for some docs workarounds (see below in "docs-build" target) -GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) +DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" default: binary diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md index 18a396c5..a4d65c32 100644 --- a/vendor/github.com/docker/docker/README.md +++ b/vendor/github.com/docker/docker/README.md @@ -1,19 +1,19 @@ -Docker: the Linux container engine -================================== +Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) +============================ Docker is an open source project to pack, ship and run any application as a lightweight container. Docker containers are both *hardware-agnostic* and *platform-agnostic*. This means they can run anywhere, from your laptop to the largest -EC2 compute instance and everything in between - and they don't require +cloud compute instance and everything in between - and they don't require you to use a particular language, framework or packaging system. That makes them great building blocks for deploying and scaling web apps, databases, and backend services without depending on a particular stack or provider. Docker began as an open-source implementation of the deployment engine which -powers [dotCloud](https://dotcloud.com), a popular Platform-as-a-Service. +powers [dotCloud](https://www.dotcloud.com), a popular Platform-as-a-Service. It benefits directly from the experience accumulated over several years of large-scale operation and support of hundreds of thousands of applications and databases. @@ -174,13 +174,14 @@ Under the hood, Docker is built on the following components: capabilities of the Linux kernel * The [Go](https://golang.org) programming language * The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) -* The [Libcontainer Specification](https://github.com/docker/libcontainer/blob/master/SPEC.md) +* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) -Contributing to Docker +Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) ====================== -[![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) -[![Jenkins Build Status](https://jenkins.dockerproject.org/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/job/Docker%20Master/) +| **Master** (Linux) | **Experimental** (linux) | **Windows** | **FreeBSD** | +|------------------|----------------------|---------|---------| +| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | Want to hack on Docker? Awesome! We have [instructions to help you get started contributing code or documentation](https://docs.docker.com/project/who-written-for/). @@ -247,7 +248,7 @@ We are always open to suggestions on process improvements, and are always lookin Stack Overflow - Stack Overflow has over 7000K Docker questions listed. We regularly + Stack Overflow has over 7000 Docker questions listed. We regularly monitor Docker questions and so do many other knowledgeable Docker users. @@ -293,3 +294,7 @@ Docker on Mac and Windows If you know of another project underway that should be listed here, please help us keep this list up-to-date by submitting a PR. + +Awesome-Docker +============== +You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. diff --git a/vendor/github.com/docker/docker/ROADMAP.md b/vendor/github.com/docker/docker/ROADMAP.md index 7a3deaa2..4ec0bf0a 100644 --- a/vendor/github.com/docker/docker/ROADMAP.md +++ b/vendor/github.com/docker/docker/ROADMAP.md @@ -130,11 +130,11 @@ we intend to take advantage of it in the Engine. ## 2.1 Docker exec We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a -*debugging* feature, as well as being strongly dependent on the the Runtime ingredient effort. +*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. ## 2.2 Dockerfile syntax -The Dockerfile syntax as we know it is simple, and has proven succesful in supporting all our +The Dockerfile syntax as we know it is simple, and has proven successful in supporting all our [official images](https://github.com/docker-library/official-images). Although this is *not* a definitive move, we temporarily won't accept more patches to the Dockerfile syntax for several reasons: diff --git a/vendor/github.com/docker/docker/VERSION b/vendor/github.com/docker/docker/VERSION index 0ef074f2..a01185b4 100644 --- a/vendor/github.com/docker/docker/VERSION +++ b/vendor/github.com/docker/docker/VERSION @@ -1 +1 @@ -1.8.0-dev +1.10.0-dev diff --git a/vendor/github.com/docker/docker/api/api_unit_test.go b/vendor/github.com/docker/docker/api/api_unit_test.go deleted file mode 100644 index 678331d3..00000000 --- a/vendor/github.com/docker/docker/api/api_unit_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package api - -import ( - "testing" -) - -func TestJsonContentType(t *testing.T) { - if !MatchesContentType("application/json", "application/json") { - t.Fail() - } - - if !MatchesContentType("application/json; charset=utf-8", "application/json") { - t.Fail() - } - - if MatchesContentType("dockerapplication/json", "application/json") { - t.Fail() - } -} diff --git a/vendor/github.com/docker/docker/api/client/attach.go b/vendor/github.com/docker/docker/api/client/attach.go index 584c53ea..825543d4 100644 --- a/vendor/github.com/docker/docker/api/client/attach.go +++ b/vendor/github.com/docker/docker/api/client/attach.go @@ -17,7 +17,7 @@ import ( // // Usage: docker attach [OPTIONS] CONTAINER func (cli *DockerCli) CmdAttach(args ...string) error { - cmd := Cli.Subcmd("attach", []string{"CONTAINER"}, "Attach to a running container", true) + cmd := Cli.Subcmd("attach", []string{"CONTAINER"}, Cli.DockerCommands["attach"].Description, true) noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process") @@ -41,6 +41,10 @@ func (cli *DockerCli) CmdAttach(args ...string) error { return fmt.Errorf("You cannot attach to a stopped container, start it first") } + if c.State.Paused { + return fmt.Errorf("You cannot attach to a paused container, unpause it first") + } + if err := cli.CheckTtyInput(!*noStdin, c.Config.Tty); err != nil { return err } diff --git a/vendor/github.com/docker/docker/api/client/build.go b/vendor/github.com/docker/docker/api/client/build.go index a3600c24..44e95fb9 100644 --- a/vendor/github.com/docker/docker/api/client/build.go +++ b/vendor/github.com/docker/docker/api/client/build.go @@ -12,7 +12,6 @@ import ( "net/url" "os" "os/exec" - "path" "path/filepath" "regexp" "runtime" @@ -35,6 +34,7 @@ import ( "github.com/docker/docker/pkg/units" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) @@ -48,8 +48,9 @@ const ( // // Usage: docker build [OPTIONS] PATH | URL | - func (cli *DockerCli) CmdBuild(args ...string) error { - cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, "Build a new image from the source code at PATH", true) - tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image") + cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true) + flTags := opts.NewListOpts(validateTag) + cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") @@ -58,12 +59,15 @@ func (cli *DockerCli) CmdBuild(args ...string) error { dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") - flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period") flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") + flBuildArg := opts.NewListOpts(opts.ValidateEnv) + cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables") + isolation := cmd.String([]string{"-isolation"}, "", "Container isolation level") ulimits := make(map[string]*ulimit.Ulimit) flUlimits := opts.NewUlimitOpt(&ulimits) @@ -128,13 +132,19 @@ func (cli *DockerCli) CmdBuild(args ...string) error { return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) } - var includes = []string{"."} - - excludes, err := utils.ReadDockerIgnore(path.Join(contextDir, ".dockerignore")) - if err != nil { + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { return err } + var excludes []string + if err == nil { + excludes, err = utils.ReadDockerIgnore(f) + if err != nil { + return err + } + } + if err := utils.ValidateContextDirectory(contextDir, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } @@ -146,6 +156,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { // removed. The deamon will remove them for us, if needed, after it // parses the Dockerfile. Ignore errors here, as they will have been // caught by ValidateContextDirectory above. + var includes = []string{"."} keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(relDockerfile, excludes) if keepThem1 || keepThem2 { @@ -198,24 +209,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error { memorySwap = parsedMemorySwap } } + // Send the build context - v := &url.Values{} - - //Check if the given image name can be resolved - if *tag != "" { - repository, tag := parsers.ParseRepositoryTag(*tag) - if err := registry.ValidateRepositoryName(repository); err != nil { - return err - } - if len(tag) > 0 { - if err := tags.ValidateTagName(tag); err != nil { - return err - } - } + v := url.Values{ + "t": flTags.GetAll(), } - - v.Set("t", *tag) - if *suppressOutput { v.Set("q", "1") } @@ -239,6 +237,10 @@ func (cli *DockerCli) CmdBuild(args ...string) error { v.Set("pull", "1") } + if !runconfig.IsolationLevel.IsDefault(runconfig.IsolationLevel(*isolation)) { + v.Set("isolation", *isolation) + } + v.Set("cpusetcpus", *flCPUSetCpus) v.Set("cpusetmems", *flCPUSetMems) v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10)) @@ -257,6 +259,14 @@ func (cli *DockerCli) CmdBuild(args ...string) error { } v.Set("ulimits", string(ulimitsJSON)) + // collect all the build-time environment variables for the container + buildArgs := runconfig.ConvertKVStringsToMap(flBuildArg.GetAll()) + buildArgsJSON, err := json.Marshal(buildArgs) + if err != nil { + return err + } + v.Set("buildargs", string(buildArgsJSON)) + headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(cli.configFile.AuthConfigs) if err != nil { @@ -307,6 +317,30 @@ func (cli *DockerCli) CmdBuild(args ...string) error { return nil } +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + repository, tag := parsers.ParseRepositoryTag(rawRepo) + if err := registry.ValidateRepositoryName(repository); err != nil { + return "", err + } + + if len(tag) == 0 { + return rawRepo, nil + } + + if err := tags.ValidateTagName(tag); err != nil { + return "", err + } + + return rawRepo, nil +} + +// isUNC returns true if the path is UNC (one starting \\). It always returns +// false on Linux. +func isUNC(path string) bool { + return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) +} + // getDockerfileRelPath uses the given context directory for a `docker build` // and returns the absolute path to the context directory, the relative path of // the dockerfile in that context directory, and a non-nil error on success. @@ -317,9 +351,16 @@ func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDi // The context dir might be a symbolic link, so follow it to the actual // target directory. - absContextDir, err = filepath.EvalSymlinks(absContextDir) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absContextDir) { + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) + } } stat, err := os.Lstat(absContextDir) @@ -354,9 +395,16 @@ func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDi } // Evaluate symlinks in the path to the Dockerfile too. - absDockerfile, err = filepath.EvalSymlinks(absDockerfile) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absDockerfile) { + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + } } if _, err := os.Lstat(absDockerfile); err != nil { @@ -462,7 +510,7 @@ func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (absCont In: response.Body, Out: out, Formatter: streamformatter.NewStreamFormatter(), - Size: int(response.ContentLength), + Size: response.ContentLength, NewLines: true, ID: "", Action: fmt.Sprintf("Downloading build context from remote url: %s", remoteURL), @@ -547,12 +595,12 @@ func rewriteDockerfileFrom(dockerfileName string, translator func(string, regist // Replace the line with a resolved "FROM repo@digest" repo, tag := parsers.ParseRepositoryTag(matches[1]) if tag == "" { - tag = tags.DEFAULTTAG + tag = tags.DefaultTag } repoInfo, err := registry.ParseRepositoryInfo(repo) if err != nil { - return nil, nil, fmt.Errorf("unable to parse repository info: %v", err) + return nil, nil, fmt.Errorf("unable to parse repository info %q: %v", repo, err) } ref := registry.ParseReference(tag) diff --git a/vendor/github.com/docker/docker/api/client/cli.go b/vendor/github.com/docker/docker/api/client/cli.go index 119b7784..834c47a4 100644 --- a/vendor/github.com/docker/docker/api/client/cli.go +++ b/vendor/github.com/docker/docker/api/client/cli.go @@ -10,7 +10,6 @@ import ( "os" "strings" - "github.com/docker/distribution/uuid" "github.com/docker/docker/cli" "github.com/docker/docker/cliconfig" "github.com/docker/docker/opts" @@ -53,7 +52,7 @@ type DockerCli struct { outFd uintptr // isTerminalIn indicates whether the client's STDIN is a TTY isTerminalIn bool - // isTerminalOut dindicates whether the client's STDOUT is a TTY + // isTerminalOut indicates whether the client's STDOUT is a TTY isTerminalOut bool // transport holds the client transport instance. transport *http.Transport @@ -100,30 +99,29 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientF cli.init = func() error { - // ignore errors from uuid package when running client commands - uuid.Loggerf = func(string, ...interface{}) {} - clientFlags.PostParse() hosts := clientFlags.Common.Hosts switch len(hosts) { case 0: - defaultHost := os.Getenv("DOCKER_HOST") - if defaultHost == "" { - defaultHost = opts.DefaultHost - } - defaultHost, err := opts.ValidateHost(defaultHost) - if err != nil { - return err - } - hosts = []string{defaultHost} + hosts = []string{os.Getenv("DOCKER_HOST")} case 1: // only accept one host to talk to default: return errors.New("Please specify only one -H") } + defaultHost := opts.DefaultTCPHost + if clientFlags.Common.TLSOptions != nil { + defaultHost = opts.DefaultTLSHost + } + + var e error + if hosts[0], e = opts.ParseHost(defaultHost, hosts[0]); e != nil { + return e + } + protoAddrParts := strings.SplitN(hosts[0], "://", 2) cli.proto, cli.addr = protoAddrParts[0], protoAddrParts[1] diff --git a/vendor/github.com/docker/docker/api/client/commit.go b/vendor/github.com/docker/docker/api/client/commit.go index fe4acd48..6a837881 100644 --- a/vendor/github.com/docker/docker/api/client/commit.go +++ b/vendor/github.com/docker/docker/api/client/commit.go @@ -18,7 +18,7 @@ import ( // // Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] func (cli *DockerCli) CmdCommit(args ...string) error { - cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, "Create a new image from a container's changes", true) + cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, Cli.DockerCommands["commit"].Description, true) flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") diff --git a/vendor/github.com/docker/docker/api/client/cp.go b/vendor/github.com/docker/docker/api/client/cp.go index 99278adf..c055fced 100644 --- a/vendor/github.com/docker/docker/api/client/cp.go +++ b/vendor/github.com/docker/docker/api/client/cp.go @@ -15,6 +15,7 @@ import ( Cli "github.com/docker/docker/cli" "github.com/docker/docker/pkg/archive" flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/system" ) type copyDirection int @@ -42,8 +43,8 @@ func (cli *DockerCli) CmdCp(args ...string) error { "cp", []string{"CONTAINER:PATH LOCALPATH|-", "LOCALPATH|- CONTAINER:PATH"}, strings.Join([]string{ - "Copy files/folders between a container and your host.\n", - "Use '-' as the source to read a tar archive from stdin\n", + Cli.DockerCommands["cp"].Description, + "\nUse '-' as the source to read a tar archive from stdin\n", "and extract it to a directory destination in a container.\n", "Use '-' as the destination to stream a tar archive of a\n", "container source to stdout.", @@ -101,7 +102,7 @@ func (cli *DockerCli) CmdCp(args ...string) error { // client, a `:` could be part of an absolute Windows path, in which case it // is immediately proceeded by a backslash. func splitCpArg(arg string) (container, path string) { - if filepath.IsAbs(arg) { + if system.IsAbs(arg) { // Explicit local absolute path, e.g., `C:\foo` or `/foo`. return "", arg } @@ -232,6 +233,20 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er // Prepare destination copy info by stat-ing the container path. dstInfo := archive.CopyInfo{Path: dstPath} dstStat, err := cli.statContainerPath(dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = cli.statContainerPath(dstContainer, linkTarget) + } + // Ignore any error and assume that the parent directory of the destination // path exists, in which case the copy may still succeed. If there is any // type of conflict (e.g., non-directory overwriting an existing directory @@ -242,15 +257,26 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() } - var content io.Reader + var ( + content io.Reader + resolvedDstPath string + ) + if srcPath == "-" { // Use STDIN. content = os.Stdin + resolvedDstPath = dstInfo.Path if !dstInfo.IsDir { return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) } } else { - srcArchive, err := archive.TarResource(srcPath) + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) if err != nil { return err } @@ -262,12 +288,6 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er // it to the specified directory in the container we get the disired // copy behavior. - // Prepare source copy info. - srcInfo, err := archive.CopyInfoStatPath(srcPath, true) - if err != nil { - return err - } - // See comments in the implementation of `archive.PrepareArchiveCopy` // for exactly what goes into deciding how and whether the source // archive needs to be altered for the correct copy behavior when it is @@ -280,12 +300,12 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er } defer preparedArchive.Close() - dstPath = dstDir + resolvedDstPath = dstDir content = preparedArchive } query := make(url.Values, 2) - query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. + query.Set("path", filepath.ToSlash(resolvedDstPath)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. query.Set("noOverwriteDirNonDir", "true") diff --git a/vendor/github.com/docker/docker/api/client/create.go b/vendor/github.com/docker/docker/api/client/create.go index 76e935eb..9ef0edab 100644 --- a/vendor/github.com/docker/docker/api/client/create.go +++ b/vendor/github.com/docker/docker/api/client/create.go @@ -26,7 +26,7 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { repos, tag := parsers.ParseRepositoryTag(image) // pull only the image tagged 'latest' if no tag was specified if tag == "" { - tag = tags.DEFAULTTAG + tag = tags.DefaultTag } v.Set("fromImage", repos) v.Set("tag", tag) @@ -96,7 +96,7 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc repo, tag := parsers.ParseRepositoryTag(config.Image) if tag == "" { - tag = tags.DEFAULTTAG + tag = tags.DefaultTag } ref := registry.ParseReference(tag) @@ -159,7 +159,7 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc // // Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] func (cli *DockerCli) CmdCreate(args ...string) error { - cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, "Create a new container", true) + cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["create"].Description, true) addTrustedFlags(cmd, true) // These are flags not stored in Config/HostConfig diff --git a/vendor/github.com/docker/docker/api/client/diff.go b/vendor/github.com/docker/docker/api/client/diff.go index b955774c..24350a04 100644 --- a/vendor/github.com/docker/docker/api/client/diff.go +++ b/vendor/github.com/docker/docker/api/client/diff.go @@ -18,7 +18,7 @@ import ( // // Usage: docker diff CONTAINER func (cli *DockerCli) CmdDiff(args ...string) error { - cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, "Inspect changes on a container's filesystem", true) + cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, Cli.DockerCommands["diff"].Description, true) cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) diff --git a/vendor/github.com/docker/docker/api/client/events.go b/vendor/github.com/docker/docker/api/client/events.go index c0168bdb..435b13e6 100644 --- a/vendor/github.com/docker/docker/api/client/events.go +++ b/vendor/github.com/docker/docker/api/client/events.go @@ -15,7 +15,7 @@ import ( // // Usage: docker events [OPTIONS] func (cli *DockerCli) CmdEvents(args ...string) error { - cmd := Cli.Subcmd("events", nil, "Get real time events from the server", true) + cmd := Cli.Subcmd("events", nil, Cli.DockerCommands["events"].Description, true) since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp") until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") flFilter := opts.NewListOpts(nil) diff --git a/vendor/github.com/docker/docker/api/client/exec.go b/vendor/github.com/docker/docker/api/client/exec.go index d02c019b..a9c836c9 100644 --- a/vendor/github.com/docker/docker/api/client/exec.go +++ b/vendor/github.com/docker/docker/api/client/exec.go @@ -16,7 +16,7 @@ import ( // // Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] func (cli *DockerCli) CmdExec(args ...string) error { - cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, "Run a command in a running container", true) + cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true) execConfig, err := runconfig.ParseExec(cmd, args) // just in case the ParseExec does not exit @@ -92,7 +92,7 @@ func (cli *DockerCli) CmdExec(args ...string) error { } } errCh = promise.Go(func() error { - return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig) + return cli.hijackWithContentType("POST", "/exec/"+execID+"/start", "application/json", execConfig.Tty, in, out, stderr, hijacked, execConfig) }) // Acknowledge the hijack before starting diff --git a/vendor/github.com/docker/docker/api/client/export.go b/vendor/github.com/docker/docker/api/client/export.go index 4d35d54b..2763b7b0 100644 --- a/vendor/github.com/docker/docker/api/client/export.go +++ b/vendor/github.com/docker/docker/api/client/export.go @@ -14,7 +14,7 @@ import ( // // Usage: docker export [OPTIONS] CONTAINER func (cli *DockerCli) CmdExport(args ...string) error { - cmd := Cli.Subcmd("export", []string{"CONTAINER"}, "Export the contents of a container's filesystem as a tar archive", true) + cmd := Cli.Subcmd("export", []string{"CONTAINER"}, Cli.DockerCommands["export"].Description, true) outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") cmd.Require(flag.Exact, 1) diff --git a/vendor/github.com/docker/docker/api/client/hijack.go b/vendor/github.com/docker/docker/api/client/hijack.go index 5853d79b..bf152c6a 100644 --- a/vendor/github.com/docker/docker/api/client/hijack.go +++ b/vendor/github.com/docker/docker/api/client/hijack.go @@ -15,8 +15,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api" - "github.com/docker/docker/autogen/dockerversion" - "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" ) @@ -128,6 +127,10 @@ func (cli *DockerCli) dial() (net.Conn, error) { } func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error { + return cli.hijackWithContentType(method, path, "text/plain", setRawTerminal, in, stdout, stderr, started, data) +} + +func (cli *DockerCli) hijackWithContentType(method, path, contentType string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error { defer func() { if started != nil { close(started) @@ -149,13 +152,20 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea req.Header.Set(k, v) } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION+" ("+runtime.GOOS+")") - req.Header.Set("Content-Type", "text/plain") + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")") + req.Header.Set("Content-Type", contentType) req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", "tcp") req.Host = cli.addr dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + return err + } + // When we set up a TCP connection for hijack, there could be long periods // of inactivity (a long running command with no output) that in certain // network setups may cause ECONNTIMEOUT, leaving the client in an unknown @@ -165,12 +175,7 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea tcpConn.SetKeepAlive(true) tcpConn.SetKeepAlivePeriod(30 * time.Second) } - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } + clientconn := httputil.NewClientConn(dial, nil) defer clientconn.Close() @@ -184,8 +189,6 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea started <- rwc } - var receiveStdout chan error - var oldState *term.State if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" { @@ -196,19 +199,15 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea defer term.RestoreTerminal(cli.inFd, oldState) } + receiveStdout := make(chan error, 1) if stdout != nil || stderr != nil { - receiveStdout = promise.Go(func() (err error) { + go func() { defer func() { if in != nil { if setRawTerminal && cli.isTerminalIn { term.RestoreTerminal(cli.inFd, oldState) } - // For some reason this Close call blocks on darwin.. - // As the client exists right after, simply discard the close - // until we find a better solution. - if runtime.GOOS != "darwin" { - in.Close() - } + in.Close() } }() @@ -219,11 +218,12 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea _, err = stdcopy.StdCopy(stdout, stderr, br) } logrus.Debugf("[hijack] End of stdout") - return err - }) + receiveStdout <- err + }() } - sendStdin := promise.Go(func() error { + stdinDone := make(chan struct{}) + go func() { if in != nil { io.Copy(rwc, in) logrus.Debugf("[hijack] End of stdin") @@ -236,22 +236,23 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea logrus.Debugf("Couldn't send EOF: %s", err) } } - // Discard errors due to pipe interruption - return nil - }) + close(stdinDone) + }() - if stdout != nil || stderr != nil { - if err := <-receiveStdout; err != nil { + select { + case err := <-receiveStdout: + if err != nil { logrus.Debugf("Error receiveStdout: %s", err) return err } - } - - if !cli.isTerminalIn { - if err := <-sendStdin; err != nil { - logrus.Debugf("Error sendStdin: %s", err) - return err + case <-stdinDone: + if stdout != nil || stderr != nil { + if err := <-receiveStdout; err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } } } + return nil } diff --git a/vendor/github.com/docker/docker/api/client/history.go b/vendor/github.com/docker/docker/api/client/history.go index 925add66..37472bad 100644 --- a/vendor/github.com/docker/docker/api/client/history.go +++ b/vendor/github.com/docker/docker/api/client/history.go @@ -3,6 +3,8 @@ package client import ( "encoding/json" "fmt" + "strconv" + "strings" "text/tabwriter" "time" @@ -18,7 +20,7 @@ import ( // // Usage: docker history [OPTIONS] IMAGE func (cli *DockerCli) CmdHistory(args ...string) error { - cmd := Cli.Subcmd("history", []string{"IMAGE"}, "Show the history of an image", true) + cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true) human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") @@ -39,38 +41,42 @@ func (cli *DockerCli) CmdHistory(args ...string) error { } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + + if *quiet { + for _, entry := range history { + if *noTrunc { + fmt.Fprintf(w, "%s\n", entry.ID) + } else { + fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) + } + } + w.Flush() + return nil } + var imageID string + var createdBy string + var created string + var size string + + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") for _, entry := range history { - if *noTrunc { - fmt.Fprintf(w, entry.ID) + imageID = entry.ID + createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) + if *noTrunc == false { + createdBy = stringutils.Truncate(createdBy, 45) + imageID = stringid.TruncateID(entry.ID) + } + + if *human { + created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" + size = units.HumanSize(float64(entry.Size)) } else { - fmt.Fprintf(w, stringid.TruncateID(entry.ID)) + created = time.Unix(entry.Created, 0).Format(time.RFC3339) + size = strconv.FormatInt(entry.Size, 10) } - if !*quiet { - if *human { - fmt.Fprintf(w, "\t%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0)))) - } else { - fmt.Fprintf(w, "\t%s\t", time.Unix(entry.Created, 0).Format(time.RFC3339)) - } - if *noTrunc { - fmt.Fprintf(w, "%s\t", entry.CreatedBy) - } else { - fmt.Fprintf(w, "%s\t", stringutils.Truncate(entry.CreatedBy, 45)) - } - - if *human { - fmt.Fprintf(w, "%s\t", units.HumanSize(float64(entry.Size))) - } else { - fmt.Fprintf(w, "%d\t", entry.Size) - } - - fmt.Fprintf(w, "%s", entry.Comment) - } - fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) } w.Flush() return nil diff --git a/vendor/github.com/docker/docker/api/client/images.go b/vendor/github.com/docker/docker/api/client/images.go index 92adeed0..ed001de7 100644 --- a/vendor/github.com/docker/docker/api/client/images.go +++ b/vendor/github.com/docker/docker/api/client/images.go @@ -22,7 +22,7 @@ import ( // // Usage: docker images [OPTIONS] [REPOSITORY] func (cli *DockerCli) CmdImages(args ...string) error { - cmd := Cli.Subcmd("images", []string{"[REPOSITORY]"}, "List images", true) + cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") diff --git a/vendor/github.com/docker/docker/api/client/import.go b/vendor/github.com/docker/docker/api/client/import.go index ec3d028f..c64e88d3 100644 --- a/vendor/github.com/docker/docker/api/client/import.go +++ b/vendor/github.com/docker/docker/api/client/import.go @@ -20,9 +20,10 @@ import ( // // Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] func (cli *DockerCli) CmdImport(args ...string) error { - cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, "Create an empty filesystem image and import the contents of the\ntarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then\noptionally tag it.", true) + cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) flChanges := opts.NewListOpts(nil) cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) @@ -35,6 +36,7 @@ func (cli *DockerCli) CmdImport(args ...string) error { v.Set("fromSrc", src) v.Set("repo", repository) + v.Set("message", *message) for _, change := range flChanges.GetAll() { v.Add("changes", change) } diff --git a/vendor/github.com/docker/docker/api/client/info.go b/vendor/github.com/docker/docker/api/client/info.go index c7b19ccb..ebed2452 100644 --- a/vendor/github.com/docker/docker/api/client/info.go +++ b/vendor/github.com/docker/docker/api/client/info.go @@ -16,7 +16,7 @@ import ( // // Usage: docker info func (cli *DockerCli) CmdInfo(args ...string) error { - cmd := Cli.Subcmd("info", nil, "Display system-wide information", true) + cmd := Cli.Subcmd("info", nil, Cli.DockerCommands["info"].Description, true) cmd.Require(flag.Exact, 0) cmd.ParseFlags(args, true) @@ -35,6 +35,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error { fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) fmt.Fprintf(cli.out, "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion) ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) if info.DriverStatus != nil { for _, pair := range info.DriverStatus { @@ -52,17 +53,17 @@ func (cli *DockerCli) CmdInfo(args ...string) error { if info.Debug { fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) - fmt.Fprintf(cli.out, "File Descriptors: %d\n", info.NFd) - fmt.Fprintf(cli.out, "Goroutines: %d\n", info.NGoroutines) - fmt.Fprintf(cli.out, "System Time: %s\n", info.SystemTime) - fmt.Fprintf(cli.out, "EventsListeners: %d\n", info.NEventsListener) - fmt.Fprintf(cli.out, "Init SHA1: %s\n", info.InitSha1) - fmt.Fprintf(cli.out, "Init Path: %s\n", info.InitPath) - fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir) + fmt.Fprintf(cli.out, " File Descriptors: %d\n", info.NFd) + fmt.Fprintf(cli.out, " Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(cli.out, " System Time: %s\n", info.SystemTime) + fmt.Fprintf(cli.out, " EventsListeners: %d\n", info.NEventsListener) + fmt.Fprintf(cli.out, " Init SHA1: %s\n", info.InitSha1) + fmt.Fprintf(cli.out, " Init Path: %s\n", info.InitPath) + fmt.Fprintf(cli.out, " Docker Root Dir: %s\n", info.DockerRootDir) } - ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HttpProxy) - ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HttpsProxy) + ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HTTPProxy) + ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HTTPSProxy) ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) if info.IndexServerAddress != "" { @@ -72,7 +73,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error { fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) } } - // Only output these warnings if the server supports these features + + // Only output these warnings if the server does not support these features if h, err := httputils.ParseServerHeader(serverResp.header.Get("Server")); err == nil { if h.OS != "windows" { if !info.MemoryLimit { @@ -82,12 +84,12 @@ func (cli *DockerCli) CmdInfo(args ...string) error { fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") } if !info.IPv4Forwarding { - fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") + fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled\n") } if !info.BridgeNfIptables { fmt.Fprintf(cli.err, "WARNING: bridge-nf-call-iptables is disabled\n") } - if !info.BridgeNfIp6tables { + if !info.BridgeNfIP6tables { fmt.Fprintf(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled\n") } } @@ -100,9 +102,13 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } } - if info.ExperimentalBuild { - fmt.Fprintf(cli.out, "Experimental: true\n") + ioutils.FprintfIfTrue(cli.out, "Experimental: %v\n", info.ExperimentalBuild) + if info.ClusterStore != "" { + fmt.Fprintf(cli.out, "Cluster store: %s\n", info.ClusterStore) } + if info.ClusterAdvertise != "" { + fmt.Fprintf(cli.out, "Cluster advertise: %s\n", info.ClusterAdvertise) + } return nil } diff --git a/vendor/github.com/docker/docker/api/client/inspect.go b/vendor/github.com/docker/docker/api/client/inspect.go index 6e728bdf..37a3bd40 100644 --- a/vendor/github.com/docker/docker/api/client/inspect.go +++ b/vendor/github.com/docker/docker/api/client/inspect.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "net/url" "strings" "text/template" @@ -24,9 +25,10 @@ var funcMap = template.FuncMap{ // // Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] func (cli *DockerCli) CmdInspect(args ...string) error { - cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, "Return low-level information on a container or image", true) + cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true) tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)") + size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) @@ -51,18 +53,27 @@ func (cli *DockerCli) CmdInspect(args ...string) error { status := 0 isImage := false - for _, name := range cmd.Args() { + v := url.Values{} + if *size { + v.Set("size", "1") + } + for _, name := range cmd.Args() { if *inspectType == "" || *inspectType == "container" { - obj, _, err = readBody(cli.call("GET", "/containers/"+name+"/json", nil, nil)) - if err != nil && *inspectType == "container" { - if strings.Contains(err.Error(), "No such") { - fmt.Fprintf(cli.err, "Error: No such container: %s\n", name) - } else { - fmt.Fprintf(cli.err, "%s", err) + obj, _, err = readBody(cli.call("GET", "/containers/"+name+"/json?"+v.Encode(), nil, nil)) + if err != nil { + if err == errConnectionFailed { + return err + } + if *inspectType == "container" { + if strings.Contains(err.Error(), "No such") { + fmt.Fprintf(cli.err, "Error: No such container: %s\n", name) + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue } - status = 1 - continue } } @@ -70,6 +81,9 @@ func (cli *DockerCli) CmdInspect(args ...string) error { obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, nil)) isImage = true if err != nil { + if err == errConnectionFailed { + return err + } if strings.Contains(err.Error(), "No such") { if *inspectType == "" { fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) @@ -82,7 +96,6 @@ func (cli *DockerCli) CmdInspect(args ...string) error { status = 1 continue } - } if tmpl == nil { @@ -94,42 +107,45 @@ func (cli *DockerCli) CmdInspect(args ...string) error { } else { rdr := bytes.NewReader(obj) dec := json.NewDecoder(rdr) + buf := bytes.NewBufferString("") if isImage { inspPtr := types.ImageInspect{} if err := dec.Decode(&inspPtr); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) + fmt.Fprintf(cli.err, "Unable to read inspect data: %v\n", err) status = 1 - continue + break } - if err := tmpl.Execute(cli.out, inspPtr); err != nil { + if err := tmpl.Execute(buf, inspPtr); err != nil { rdr.Seek(0, 0) - var raw interface{} - if err := dec.Decode(&raw); err != nil { - return err - } - if err = tmpl.Execute(cli.out, raw); err != nil { - return err + var ok bool + + if buf, ok = cli.decodeRawInspect(tmpl, dec); !ok { + fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) + status = 1 + break } } } else { inspPtr := types.ContainerJSON{} if err := dec.Decode(&inspPtr); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) + fmt.Fprintf(cli.err, "Unable to read inspect data: %v\n", err) status = 1 - continue + break } - if err := tmpl.Execute(cli.out, inspPtr); err != nil { + if err := tmpl.Execute(buf, inspPtr); err != nil { rdr.Seek(0, 0) - var raw interface{} - if err := dec.Decode(&raw); err != nil { - return err - } - if err = tmpl.Execute(cli.out, raw); err != nil { - return err + var ok bool + + if buf, ok = cli.decodeRawInspect(tmpl, dec); !ok { + fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) + status = 1 + break } } } + + cli.out.Write(buf.Bytes()) cli.out.Write([]byte{'\n'}) } indented.WriteString(",") @@ -155,3 +171,33 @@ func (cli *DockerCli) CmdInspect(args ...string) error { } return nil } + +// decodeRawInspect executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +// Unfortunately, go 1.4 doesn't fail executing invalid templates when the input is an interface. +// It doesn't allow to modify this behavior either, sending messages to the output. +// We assume that the template is invalid when there is a , if the template was valid +// we'd get or "" values. In that case we fail with the original error raised executing the +// template with the typed input. +// +// TODO: Go 1.5 allows to customize the error behavior, we can probably get rid of this as soon as +// we build Docker with that version: +// https://golang.org/pkg/text/template/#Template.Option +func (cli *DockerCli) decodeRawInspect(tmpl *template.Template, dec *json.Decoder) (*bytes.Buffer, bool) { + var raw interface{} + buf := bytes.NewBufferString("") + + if rawErr := dec.Decode(&raw); rawErr != nil { + fmt.Fprintf(cli.err, "Unable to read inspect data: %v\n", rawErr) + return buf, false + } + + if rawErr := tmpl.Execute(buf, raw); rawErr != nil { + return buf, false + } + + if strings.Contains(buf.String(), "") { + return buf, false + } + return buf, true +} diff --git a/vendor/github.com/docker/docker/api/client/kill.go b/vendor/github.com/docker/docker/api/client/kill.go index 63abed31..c4c355c7 100644 --- a/vendor/github.com/docker/docker/api/client/kill.go +++ b/vendor/github.com/docker/docker/api/client/kill.go @@ -11,7 +11,7 @@ import ( // // Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdKill(args ...string) error { - cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, "Kill a running container using SIGKILL or a specified signal", true) + cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["kill"].Description, true) signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") cmd.Require(flag.Min, 1) diff --git a/vendor/github.com/docker/docker/api/client/load.go b/vendor/github.com/docker/docker/api/client/load.go index 9501db4f..378170e1 100644 --- a/vendor/github.com/docker/docker/api/client/load.go +++ b/vendor/github.com/docker/docker/api/client/load.go @@ -14,7 +14,7 @@ import ( // // Usage: docker load [OPTIONS] func (cli *DockerCli) CmdLoad(args ...string) error { - cmd := Cli.Subcmd("load", nil, "Load an image from a tar archive or STDIN", true) + cmd := Cli.Subcmd("load", nil, Cli.DockerCommands["load"].Description, true) infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") cmd.Require(flag.Exact, 0) diff --git a/vendor/github.com/docker/docker/api/client/login.go b/vendor/github.com/docker/docker/api/client/login.go index 68ec5c6d..e941a14e 100644 --- a/vendor/github.com/docker/docker/api/client/login.go +++ b/vendor/github.com/docker/docker/api/client/login.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os" + "runtime" "strings" "github.com/docker/docker/api/types" @@ -22,7 +23,7 @@ import ( // // Usage: docker login SERVER func (cli *DockerCli) CmdLogin(args ...string) error { - cmd := Cli.Subcmd("login", []string{"[SERVER]"}, "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.IndexServer+"\" is the default.", true) + cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true) cmd.Require(flag.Max, 1) var username, password, email string @@ -33,6 +34,11 @@ func (cli *DockerCli) CmdLogin(args ...string) error { cmd.ParseFlags(args, true) + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.in = os.Stdin + } + serverAddress := registry.IndexServer if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) @@ -64,7 +70,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error { if username == "" { promptDefault("Username", authconfig.Username) username = readInput(cli.in, cli.out) - username = strings.Trim(username, " ") + username = strings.TrimSpace(username) if username == "" { username = authconfig.Username } diff --git a/vendor/github.com/docker/docker/api/client/logout.go b/vendor/github.com/docker/docker/api/client/logout.go index e81299b1..3753cbbe 100644 --- a/vendor/github.com/docker/docker/api/client/logout.go +++ b/vendor/github.com/docker/docker/api/client/logout.go @@ -14,7 +14,7 @@ import ( // // Usage: docker logout [SERVER] func (cli *DockerCli) CmdLogout(args ...string) error { - cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServer+"\" is the default.", true) + cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true) cmd.Require(flag.Max, 1) cmd.ParseFlags(args, true) @@ -26,13 +26,14 @@ func (cli *DockerCli) CmdLogout(args ...string) error { if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) - } else { - fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) - delete(cli.configFile.AuthConfigs, serverAddress) - - if err := cli.configFile.Save(); err != nil { - return fmt.Errorf("Failed to save docker config: %v", err) - } + return nil } + + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + delete(cli.configFile.AuthConfigs, serverAddress) + if err := cli.configFile.Save(); err != nil { + return fmt.Errorf("Failed to save docker config: %v", err) + } + return nil } diff --git a/vendor/github.com/docker/docker/api/client/logs.go b/vendor/github.com/docker/docker/api/client/logs.go index f1d647f3..6de7b133 100644 --- a/vendor/github.com/docker/docker/api/client/logs.go +++ b/vendor/github.com/docker/docker/api/client/logs.go @@ -12,11 +12,16 @@ import ( "github.com/docker/docker/pkg/timeutils" ) +var validDrivers = map[string]bool{ + "json-file": true, + "journald": true, +} + // CmdLogs fetches the logs of a given container. // // docker logs [OPTIONS] CONTAINER func (cli *DockerCli) CmdLogs(args ...string) error { - cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, "Fetch the logs of a container", true) + cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, Cli.DockerCommands["logs"].Description, true) follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") since := cmd.String([]string{"-since"}, "", "Show logs since timestamp") times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") @@ -37,8 +42,8 @@ func (cli *DockerCli) CmdLogs(args ...string) error { return err } - if logType := c.HostConfig.LogConfig.Type; logType != "json-file" { - return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver (got: %s)", logType) + if !validDrivers[c.HostConfig.LogConfig.Type] { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) } v := url.Values{} diff --git a/vendor/github.com/docker/docker/api/client/network.go b/vendor/github.com/docker/docker/api/client/network.go index a1de2699..62ee46e1 100644 --- a/vendor/github.com/docker/docker/api/client/network.go +++ b/vendor/github.com/docker/docker/api/client/network.go @@ -1,16 +1,381 @@ -// +build experimental - package client import ( - "os" + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "strings" + "text/tabwriter" - nwclient "github.com/docker/libnetwork/client" + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" ) -// CmdNetwork is used to create, display and configure network endpoints. +// CmdNetwork is the parent subcommand for all network commands +// +// Usage: docker network [OPTIONS] func (cli *DockerCli) CmdNetwork(args ...string) error { - nCli := nwclient.NewNetworkCli(cli.out, cli.err, nwclient.CallFunc(cli.callWrapper)) - args = append([]string{"network"}, args...) - return nCli.Cmd(os.Args[0], args...) + cmd := Cli.Subcmd("network", []string{"COMMAND [OPTIONS]"}, networkUsage(), false) + cmd.Require(flag.Min, 1) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdNetworkCreate creates a new network with a given name +// +// Usage: docker network create [OPTIONS] +func (cli *DockerCli) CmdNetworkCreate(args ...string) error { + cmd := Cli.Subcmd("network create", []string{"NETWORK-NAME"}, "Creates a new network with a name specified by the user", false) + flDriver := cmd.String([]string{"d", "-driver"}, "bridge", "Driver to manage the Network") + flOpts := opts.NewMapOpts(nil, nil) + + flIpamDriver := cmd.String([]string{"-ipam-driver"}, "default", "IP Address Management Driver") + flIpamSubnet := opts.NewListOpts(nil) + flIpamIPRange := opts.NewListOpts(nil) + flIpamGateway := opts.NewListOpts(nil) + flIpamAux := opts.NewMapOpts(nil, nil) + + cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment") + cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range") + cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet") + cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver") + cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options") + + cmd.Require(flag.Exact, 1) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + // Set the default driver to "" if the user didn't set the value. + // That way we can know whether it was user input or not. + driver := *flDriver + if !cmd.IsSet("-driver") && !cmd.IsSet("d") { + driver = "" + } + + ipamCfg, err := consolidateIpam(flIpamSubnet.GetAll(), flIpamIPRange.GetAll(), flIpamGateway.GetAll(), flIpamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Name: cmd.Arg(0), + Driver: driver, + IPAM: network.IPAM{Driver: *flIpamDriver, Config: ipamCfg}, + Options: flOpts.GetAll(), + CheckDuplicate: true, + } + obj, _, err := readBody(cli.call("POST", "/networks/create", nc, nil)) + if err != nil { + return err + } + var resp types.NetworkCreateResponse + err = json.Unmarshal(obj, &resp) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", resp.ID) + return nil +} + +// CmdNetworkRm deletes a network +// +// Usage: docker network rm +func (cli *DockerCli) CmdNetworkRm(args ...string) error { + cmd := Cli.Subcmd("network rm", []string{"NETWORK"}, "Deletes a network", false) + cmd.Require(flag.Exact, 1) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + _, _, err = readBody(cli.call("DELETE", "/networks/"+cmd.Arg(0), nil, nil)) + if err != nil { + return err + } + return nil +} + +// CmdNetworkConnect connects a container to a network +// +// Usage: docker network connect +func (cli *DockerCli) CmdNetworkConnect(args ...string) error { + cmd := Cli.Subcmd("network connect", []string{"NETWORK CONTAINER"}, "Connects a container to a network", false) + cmd.Require(flag.Exact, 2) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + nc := types.NetworkConnect{Container: cmd.Arg(1)} + _, _, err = readBody(cli.call("POST", "/networks/"+cmd.Arg(0)+"/connect", nc, nil)) + return err +} + +// CmdNetworkDisconnect disconnects a container from a network +// +// Usage: docker network disconnect +func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error { + cmd := Cli.Subcmd("network disconnect", []string{"NETWORK CONTAINER"}, "Disconnects container from a network", false) + cmd.Require(flag.Exact, 2) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + nc := types.NetworkConnect{Container: cmd.Arg(1)} + _, _, err = readBody(cli.call("POST", "/networks/"+cmd.Arg(0)+"/disconnect", nc, nil)) + return err +} + +// CmdNetworkLs lists all the netorks managed by docker daemon +// +// Usage: docker network ls [OPTIONS] +func (cli *DockerCli) CmdNetworkLs(args ...string) error { + cmd := Cli.Subcmd("network ls", nil, "Lists networks", true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output") + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + + if err != nil { + return err + } + obj, _, err := readBody(cli.call("GET", "/networks", nil, nil)) + if err != nil { + return err + } + + var networkResources []types.NetworkResource + err = json.Unmarshal(obj, &networkResources) + if err != nil { + return err + } + + wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + + // unless quiet (-q) is specified, print field titles + if !*quiet { + fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER") + } + + for _, networkResource := range networkResources { + ID := networkResource.ID + netName := networkResource.Name + if !*noTrunc { + ID = stringid.TruncateID(ID) + } + if *quiet { + fmt.Fprintln(wr, ID) + continue + } + driver := networkResource.Driver + fmt.Fprintf(wr, "%s\t%s\t%s\t", + ID, + netName, + driver) + fmt.Fprint(wr, "\n") + } + wr.Flush() + return nil +} + +// CmdNetworkInspect inspects the network object for more details +// +// Usage: docker network inspect [OPTIONS] [NETWORK...] +func (cli *DockerCli) CmdNetworkInspect(args ...string) error { + cmd := Cli.Subcmd("network inspect", []string{"NETWORK [NETWORK...]"}, "Displays detailed information on a network", false) + cmd.Require(flag.Min, 1) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + status := 0 + var networks []*types.NetworkResource + for _, name := range cmd.Args() { + obj, _, err := readBody(cli.call("GET", "/networks/"+name, nil, nil)) + if err != nil { + if strings.Contains(err.Error(), "not found") { + fmt.Fprintf(cli.err, "Error: No such network: %s\n", name) + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue + } + networkResource := types.NetworkResource{} + if err := json.NewDecoder(bytes.NewReader(obj)).Decode(&networkResource); err != nil { + return err + } + + networks = append(networks, &networkResource) + } + + b, err := json.MarshalIndent(networks, "", " ") + if err != nil { + return err + } + + if _, err := io.Copy(cli.out, bytes.NewReader(b)); err != nil { + return err + } + io.WriteString(cli.out, "\n") + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to corelate the various related parameters and consolidate them. +// consoidateIpam consolidates subnets, ip-ranges, gateways and auxilary addresses into +// structured ipam data. +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, fmt.Errorf("Invalid subnet %s : %v", s, err) + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, fmt.Errorf("Invalid cidr %s : %v", data, err) + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} + +func networkUsage() string { + networkCommands := map[string]string{ + "create": "Create a network", + "connect": "Connect container to a network", + "disconnect": "Disconnect container from a network", + "inspect": "Display detailed network information", + "ls": "List all networks", + "rm": "Remove a network", + } + + help := "Commands:\n" + + for cmd, description := range networkCommands { + help += fmt.Sprintf(" %-25.25s%s\n", cmd, description) + } + + help += fmt.Sprintf("\nRun 'docker network COMMAND --help' for more information on a command.") + return help } diff --git a/vendor/github.com/docker/docker/api/client/pause.go b/vendor/github.com/docker/docker/api/client/pause.go index 94dd59d7..e144a0bd 100644 --- a/vendor/github.com/docker/docker/api/client/pause.go +++ b/vendor/github.com/docker/docker/api/client/pause.go @@ -11,7 +11,7 @@ import ( // // Usage: docker pause CONTAINER [CONTAINER...] func (cli *DockerCli) CmdPause(args ...string) error { - cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, "Pause all processes within a container", true) + cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["pause"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) diff --git a/vendor/github.com/docker/docker/api/client/port.go b/vendor/github.com/docker/docker/api/client/port.go index d8bcbf6e..a981bc5e 100644 --- a/vendor/github.com/docker/docker/api/client/port.go +++ b/vendor/github.com/docker/docker/api/client/port.go @@ -15,7 +15,7 @@ import ( // // Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] func (cli *DockerCli) CmdPort(args ...string) error { - cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, "List port mappings for the CONTAINER, or lookup the public-facing port that\nis NAT-ed to the PRIVATE_PORT", true) + cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, Cli.DockerCommands["port"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) diff --git a/vendor/github.com/docker/docker/api/client/ps.go b/vendor/github.com/docker/docker/api/client/ps.go index e7fb97c3..50d76fbc 100644 --- a/vendor/github.com/docker/docker/api/client/ps.go +++ b/vendor/github.com/docker/docker/api/client/ps.go @@ -23,7 +23,7 @@ func (cli *DockerCli) CmdPs(args ...string) error { psFilterArgs = filters.Args{} v = url.Values{} - cmd = Cli.Subcmd("ps", nil, "List containers", true) + cmd = Cli.Subcmd("ps", nil, Cli.DockerCommands["ps"].Description, true) quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") @@ -95,7 +95,7 @@ func (cli *DockerCli) CmdPs(args ...string) error { f := *format if len(f) == 0 { - if len(cli.PsFormat()) > 0 { + if len(cli.PsFormat()) > 0 && !*quiet { f = cli.PsFormat() } else { f = "table" diff --git a/vendor/github.com/docker/docker/api/client/ps/custom.go b/vendor/github.com/docker/docker/api/client/ps/custom.go index d9e8fe07..1739fd12 100644 --- a/vendor/github.com/docker/docker/api/client/ps/custom.go +++ b/vendor/github.com/docker/docker/api/client/ps/custom.go @@ -1,12 +1,9 @@ package ps import ( - "bytes" "fmt" "strconv" "strings" - "text/tabwriter" - "text/template" "time" "github.com/docker/docker/api" @@ -64,6 +61,11 @@ func (c *containerContext) Image() string { if c.c.Image == "" { return "" } + if c.trunc { + if stringid.TruncateID(c.c.ImageID) == stringid.TruncateID(c.c.Image) { + return stringutils.Truncate(c.c.Image, 12) + } + } return c.c.Image } @@ -149,65 +151,6 @@ func (c *containerContext) addHeader(header string) { c.header = append(c.header, strings.ToUpper(header)) } -func customFormat(ctx Context, containers []types.Container) { - var ( - table bool - header string - format = ctx.Format - buffer = bytes.NewBufferString("") - ) - - if strings.HasPrefix(ctx.Format, tableKey) { - table = true - format = format[len(tableKey):] - } - - format = strings.Trim(format, " ") - r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") - format = r.Replace(format) - - if table && ctx.Size { - format += "\t{{.Size}}" - } - - tmpl, err := template.New("ps template").Parse(format) - if err != nil { - buffer.WriteString(fmt.Sprintf("Invalid `docker ps` format: %v\n", err)) - } - - for _, container := range containers { - containerCtx := &containerContext{ - trunc: ctx.Trunc, - c: container, - } - if err := tmpl.Execute(buffer, containerCtx); err != nil { - buffer = bytes.NewBufferString(fmt.Sprintf("Invalid `docker ps` format: %v\n", err)) - break - } - if table && len(header) == 0 { - header = containerCtx.fullHeader() - } - buffer.WriteString("\n") - } - - if table { - if len(header) == 0 { - // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template - containerCtx := &containerContext{} - tmpl.Execute(bytes.NewBufferString(""), containerCtx) - header = containerCtx.fullHeader() - } - - t := tabwriter.NewWriter(ctx.Output, 20, 1, 3, ' ', 0) - t.Write([]byte(header)) - t.Write([]byte("\n")) - buffer.WriteTo(t) - t.Flush() - } else { - buffer.WriteTo(ctx.Output) - } -} - func stripNamePrefix(ss []string) []string { for i, s := range ss { ss[i] = s[1:] diff --git a/vendor/github.com/docker/docker/api/client/ps/custom_test.go b/vendor/github.com/docker/docker/api/client/ps/custom_test.go index e6575375..c0b2eb2e 100644 --- a/vendor/github.com/docker/docker/api/client/ps/custom_test.go +++ b/vendor/github.com/docker/docker/api/client/ps/custom_test.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/pkg/stringid" ) -func TestContainerContextID(t *testing.T) { +func TestContainerPsContext(t *testing.T) { containerID := stringid.GenerateRandomID() unix := time.Now().Unix() @@ -23,16 +23,39 @@ func TestContainerContextID(t *testing.T) { call func() string }{ {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), idHeader, ctx.ID}, + {types.Container{ID: containerID}, false, containerID, idHeader, ctx.ID}, {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, + {types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image}, + {types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image}, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + true, + "a5a665ff33ec", + imageHeader, + ctx.Image, + }, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + false, + "a5a665ff33eced1e0803148700880edab4", + imageHeader, + ctx.Image, + }, {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, - {types.Container{Created: int(unix)}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, + {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, + {types.Container{}, true, "", labelsHeader, ctx.Labels}, {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, + {types.Container{Created: unix}, true, "Less than a second", runningForHeader, ctx.RunningFor}, } for _, c := range cases { @@ -67,8 +90,8 @@ func TestContainerContextID(t *testing.T) { } } - c := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} - ctx = containerContext{c: c, trunc: true} + c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} + ctx = containerContext{c: c1, trunc: true} sid := ctx.Label("com.docker.swarm.swarm-id") node := ctx.Label("com.docker.swarm.node_name") @@ -85,4 +108,19 @@ func TestContainerContextID(t *testing.T) { t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) } + + c2 := types.Container{} + ctx = containerContext{c: c2, trunc: true} + + label := ctx.Label("anything.really") + if label != "" { + t.Fatalf("Expected an empty string, was %s", label) + } + + ctx = containerContext{c: c2, trunc: true} + fullHeader := ctx.fullHeader() + if fullHeader != "" { + t.Fatalf("Expected fullHeader to be empty, was %s", fullHeader) + } + } diff --git a/vendor/github.com/docker/docker/api/client/ps/formatter.go b/vendor/github.com/docker/docker/api/client/ps/formatter.go index 1a1323ac..2a45bfcf 100644 --- a/vendor/github.com/docker/docker/api/client/ps/formatter.go +++ b/vendor/github.com/docker/docker/api/client/ps/formatter.go @@ -1,7 +1,12 @@ package ps import ( + "bytes" + "fmt" "io" + "strings" + "text/tabwriter" + "text/template" "github.com/docker/docker/api/types" ) @@ -71,3 +76,65 @@ func tableFormat(ctx Context, containers []types.Container) { customFormat(ctx, containers) } + +func customFormat(ctx Context, containers []types.Container) { + var ( + table bool + header string + format = ctx.Format + buffer = bytes.NewBufferString("") + ) + + if strings.HasPrefix(ctx.Format, tableKey) { + table = true + format = format[len(tableKey):] + } + + format = strings.Trim(format, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + format = r.Replace(format) + + if table && ctx.Size { + format += "\t{{.Size}}" + } + + tmpl, err := template.New("").Parse(format) + if err != nil { + buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) + buffer.WriteTo(ctx.Output) + return + } + + for _, container := range containers { + containerCtx := &containerContext{ + trunc: ctx.Trunc, + c: container, + } + if err := tmpl.Execute(buffer, containerCtx); err != nil { + buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) + buffer.WriteTo(ctx.Output) + return + } + if table && len(header) == 0 { + header = containerCtx.fullHeader() + } + buffer.WriteString("\n") + } + + if table { + if len(header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + containerCtx := &containerContext{} + tmpl.Execute(bytes.NewBufferString(""), containerCtx) + header = containerCtx.fullHeader() + } + + t := tabwriter.NewWriter(ctx.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(header)) + t.Write([]byte("\n")) + buffer.WriteTo(t) + t.Flush() + } else { + buffer.WriteTo(ctx.Output) + } +} diff --git a/vendor/github.com/docker/docker/api/client/ps/formatter_test.go b/vendor/github.com/docker/docker/api/client/ps/formatter_test.go new file mode 100644 index 00000000..6e7304c3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/ps/formatter_test.go @@ -0,0 +1,208 @@ +package ps + +import ( + "bytes" + "testing" + + "github.com/docker/docker/api/types" +) + +func TestFormat(t *testing.T) { + contexts := []struct { + context Context + expected string + }{ + // Errors + { + Context{ + Format: "{{InvalidFunction}}", + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{ + Format: "{{nil}}", + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + Context{ + Format: "table", + }, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +containerID1 ubuntu "" 45 years ago foobar_baz +containerID2 ubuntu "" 45 years ago foobar_bar +`, + }, + { + Context{ + Format: "table {{.Image}}", + }, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{ + Format: "table {{.Image}}", + Size: true, + }, + "IMAGE SIZE\nubuntu 0 B\nubuntu 0 B\n", + }, + { + Context{ + Format: "table {{.Image}}", + Quiet: true, + }, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{ + Format: "table", + Quiet: true, + }, + "containerID1\ncontainerID2\n", + }, + // Raw Format + { + Context{ + Format: "raw", + }, + `container_id: containerID1 +image: ubuntu +command: "" +created_at: 1970-01-01 00:00:00 +0000 UTC +status: +names: foobar_baz +labels: +ports: + +container_id: containerID2 +image: ubuntu +command: "" +created_at: 1970-01-01 00:00:00 +0000 UTC +status: +names: foobar_bar +labels: +ports: + +`, + }, + { + Context{ + Format: "raw", + Size: true, + }, + `container_id: containerID1 +image: ubuntu +command: "" +created_at: 1970-01-01 00:00:00 +0000 UTC +status: +names: foobar_baz +labels: +ports: +size: 0 B + +container_id: containerID2 +image: ubuntu +command: "" +created_at: 1970-01-01 00:00:00 +0000 UTC +status: +names: foobar_bar +labels: +ports: +size: 0 B + +`, + }, + { + Context{ + Format: "raw", + Quiet: true, + }, + "container_id: containerID1\ncontainer_id: containerID2\n", + }, + // Custom Format + { + Context{ + Format: "{{.Image}}", + }, + "ubuntu\nubuntu\n", + }, + { + Context{ + Format: "{{.Image}}", + Size: true, + }, + "ubuntu\nubuntu\n", + }, + } + + for _, context := range contexts { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"}, + } + out := bytes.NewBufferString("") + context.context.Output = out + Format(context.context, containers) + actual := out.String() + if actual != context.expected { + t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) + } + // Clean buffer + out.Reset() + } +} + +func TestCustomFormatNoContainers(t *testing.T) { + out := bytes.NewBufferString("") + containers := []types.Container{} + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Image}}", + Output: out, + }, + "", + }, + { + Context{ + Format: "table {{.Image}}", + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: "{{.Image}}", + Output: out, + Size: true, + }, + "", + }, + { + Context{ + Format: "table {{.Image}}", + Output: out, + Size: true, + }, + "IMAGE SIZE\n", + }, + } + + for _, context := range contexts { + customFormat(context.context, containers) + actual := out.String() + if actual != context.expected { + t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) + } + // Clean buffer + out.Reset() + } +} diff --git a/vendor/github.com/docker/docker/api/client/pull.go b/vendor/github.com/docker/docker/api/client/pull.go index d6b85543..dcb2ecb0 100644 --- a/vendor/github.com/docker/docker/api/client/pull.go +++ b/vendor/github.com/docker/docker/api/client/pull.go @@ -15,7 +15,7 @@ import ( // // Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] func (cli *DockerCli) CmdPull(args ...string) error { - cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, "Pull an image or a repository from a registry", true) + cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, Cli.DockerCommands["pull"].Description, true) allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") addTrustedFlags(cmd, true) cmd.Require(flag.Exact, 1) @@ -25,7 +25,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { taglessRemote, tag := parsers.ParseRepositoryTag(remote) if tag == "" && !*allTags { - tag = tags.DEFAULTTAG + tag = tags.DefaultTag fmt.Fprintf(cli.out, "Using default tag: %s\n", tag) } else if tag != "" && *allTags { return fmt.Errorf("tag can't be used with --all-tags/-a") diff --git a/vendor/github.com/docker/docker/api/client/push.go b/vendor/github.com/docker/docker/api/client/push.go index 5d01511c..0c8823f0 100644 --- a/vendor/github.com/docker/docker/api/client/push.go +++ b/vendor/github.com/docker/docker/api/client/push.go @@ -14,7 +14,7 @@ import ( // // Usage: docker push NAME[:TAG] func (cli *DockerCli) CmdPush(args ...string) error { - cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, "Push an image or a repository to a registry", true) + cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, Cli.DockerCommands["push"].Description, true) addTrustedFlags(cmd, false) cmd.Require(flag.Exact, 1) diff --git a/vendor/github.com/docker/docker/api/client/rename.go b/vendor/github.com/docker/docker/api/client/rename.go index ae09a462..124ee1fc 100644 --- a/vendor/github.com/docker/docker/api/client/rename.go +++ b/vendor/github.com/docker/docker/api/client/rename.go @@ -2,6 +2,7 @@ package client import ( "fmt" + "strings" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" @@ -11,13 +12,17 @@ import ( // // Usage: docker rename OLD_NAME NEW_NAME func (cli *DockerCli) CmdRename(args ...string) error { - cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, "Rename a container", true) + cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, Cli.DockerCommands["rename"].Description, true) cmd.Require(flag.Exact, 2) cmd.ParseFlags(args, true) - oldName := cmd.Arg(0) - newName := cmd.Arg(1) + oldName := strings.TrimSpace(cmd.Arg(0)) + newName := strings.TrimSpace(cmd.Arg(1)) + + if oldName == "" || newName == "" { + return fmt.Errorf("Error: Neither old nor new names may be empty") + } if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/rename?name=%s", oldName, newName), nil, nil)); err != nil { fmt.Fprintf(cli.err, "%s\n", err) diff --git a/vendor/github.com/docker/docker/api/client/restart.go b/vendor/github.com/docker/docker/api/client/restart.go index 88de4f75..92c61292 100644 --- a/vendor/github.com/docker/docker/api/client/restart.go +++ b/vendor/github.com/docker/docker/api/client/restart.go @@ -9,11 +9,11 @@ import ( flag "github.com/docker/docker/pkg/mflag" ) -// CmdRestart restarts one or more running containers. +// CmdRestart restarts one or more containers. // -// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] +// Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdRestart(args ...string) error { - cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, "Restart a running container", true) + cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["restart"].Description, true) nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") cmd.Require(flag.Min, 1) diff --git a/vendor/github.com/docker/docker/api/client/rm.go b/vendor/github.com/docker/docker/api/client/rm.go index 5766727a..9f49a2fd 100644 --- a/vendor/github.com/docker/docker/api/client/rm.go +++ b/vendor/github.com/docker/docker/api/client/rm.go @@ -13,7 +13,7 @@ import ( // // Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdRm(args ...string) error { - cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, "Remove one or more containers", true) + cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["rm"].Description, true) v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link") force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") diff --git a/vendor/github.com/docker/docker/api/client/rmi.go b/vendor/github.com/docker/docker/api/client/rmi.go index 25d5646e..b4e37006 100644 --- a/vendor/github.com/docker/docker/api/client/rmi.go +++ b/vendor/github.com/docker/docker/api/client/rmi.go @@ -14,7 +14,7 @@ import ( // // Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] func (cli *DockerCli) CmdRmi(args ...string) error { - cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, "Remove one or more images", true) + cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["rmi"].Description, true) force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") cmd.Require(flag.Min, 1) diff --git a/vendor/github.com/docker/docker/api/client/run.go b/vendor/github.com/docker/docker/api/client/run.go index 50fccfa3..205aeebc 100644 --- a/vendor/github.com/docker/docker/api/client/run.go +++ b/vendor/github.com/docker/docker/api/client/run.go @@ -6,9 +6,11 @@ import ( "net/url" "os" "runtime" + "strings" "github.com/Sirupsen/logrus" Cli "github.com/docker/docker/cli" + derr "github.com/docker/docker/errors" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/signal" @@ -36,11 +38,34 @@ func (cid *cidFile) Write(id string) error { return nil } +// if container start fails with 'command not found' error, return 127 +// if container start fails with 'command cannot be invoked' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.Trim(err.Error(), "Error response from daemon: ") + statusError := Cli.StatusError{} + derrCmdNotFound := derr.ErrorCodeCmdNotFound.Message() + derrCouldNotInvoke := derr.ErrorCodeCmdCouldNotBeInvoked.Message() + derrNoSuchImage := derr.ErrorCodeNoSuchImageHash.Message() + derrNoSuchImageTag := derr.ErrorCodeNoSuchImageTag.Message() + switch trimmedErr { + case derrCmdNotFound: + statusError = Cli.StatusError{StatusCode: 127} + case derrCouldNotInvoke: + statusError = Cli.StatusError{StatusCode: 126} + case derrNoSuchImage, derrNoSuchImageTag: + statusError = Cli.StatusError{StatusCode: 125} + default: + statusError = Cli.StatusError{StatusCode: 125} + } + return statusError +} + // CmdRun runs a command in a new container. // // Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] func (cli *DockerCli) CmdRun(args ...string) error { - cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, "Run a command in a new container", true) + cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["run"].Description, true) addTrustedFlags(cmd, true) // These are flags not stored in Config/HostConfig @@ -60,7 +85,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { // just in case the Parse does not exit if err != nil { cmd.ReportError(err.Error(), true) - os.Exit(1) + os.Exit(125) } if len(hostConfig.DNS) > 0 { @@ -115,7 +140,8 @@ func (cli *DockerCli) CmdRun(args ...string) error { createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) if err != nil { - return err + cmd.ReportError(err.Error(), true) + return runStartContainerErr(err) } if sigProxy { sigc := cli.forwardAllSignals(createResponse.ID) @@ -199,8 +225,9 @@ func (cli *DockerCli) CmdRun(args ...string) error { }() //start the container - if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, nil)); err != nil { - return err + if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, nil)); err != nil { + cmd.ReportError(err.Error(), false) + return runStartContainerErr(err) } if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { @@ -230,7 +257,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { // Autoremove: wait for the container to finish, retrieve // the exit code and remove the container if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, nil)); err != nil { - return err + return runStartContainerErr(err) } if _, status, err = getExitCode(cli, createResponse.ID); err != nil { return err diff --git a/vendor/github.com/docker/docker/api/client/save.go b/vendor/github.com/docker/docker/api/client/save.go index ee19d776..f14f9a45 100644 --- a/vendor/github.com/docker/docker/api/client/save.go +++ b/vendor/github.com/docker/docker/api/client/save.go @@ -15,8 +15,8 @@ import ( // // Usage: docker save [OPTIONS] IMAGE [IMAGE...] func (cli *DockerCli) CmdSave(args ...string) error { - cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, "Save an image(s) to a tar archive (streamed to STDOUT by default)", true) - outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") + cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["save"].Description+" (streamed to STDOUT by default)", true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) @@ -25,13 +25,14 @@ func (cli *DockerCli) CmdSave(args ...string) error { output = cli.out err error ) + + if *outfile == "" && cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } if *outfile != "" { - output, err = os.Create(*outfile) - if err != nil { + if output, err = os.Create(*outfile); err != nil { return err } - } else if cli.isTerminalOut { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") } sopts := &streamOpts{ @@ -39,19 +40,13 @@ func (cli *DockerCli) CmdSave(args ...string) error { out: output, } - if len(cmd.Args()) == 1 { - image := cmd.Arg(0) - if _, err := cli.stream("GET", "/images/"+image+"/get", sopts); err != nil { - return err - } - } else { - v := url.Values{} - for _, arg := range cmd.Args() { - v.Add("names", arg) - } - if _, err := cli.stream("GET", "/images/get?"+v.Encode(), sopts); err != nil { - return err - } + v := url.Values{} + for _, arg := range cmd.Args() { + v.Add("names", arg) } + if _, err := cli.stream("GET", "/images/get?"+v.Encode(), sopts); err != nil { + return err + } + return nil } diff --git a/vendor/github.com/docker/docker/api/client/search.go b/vendor/github.com/docker/docker/api/client/search.go index 2305d083..bf120cdd 100644 --- a/vendor/github.com/docker/docker/api/client/search.go +++ b/vendor/github.com/docker/docker/api/client/search.go @@ -26,7 +26,7 @@ func (r ByStars) Less(i, j int) bool { return r[i].StarCount < r[j].StarCount } // // Usage: docker search [OPTIONS] TERM func (cli *DockerCli) CmdSearch(args ...string) error { - cmd := Cli.Subcmd("search", []string{"TERM"}, "Search the Docker Hub for images", true) + cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true) noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds") automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") @@ -41,12 +41,13 @@ func (cli *DockerCli) CmdSearch(args ...string) error { // Resolve the Repository name from fqn to hostname + name taglessRemote, _ := parsers.ParseRepositoryTag(name) - repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) + + indexInfo, err := registry.ParseIndexInfo(taglessRemote) if err != nil { return err } - rdr, _, err := cli.clientRequestAttemptLogin("GET", "/images/search?"+v.Encode(), nil, nil, repoInfo.Index, "search") + rdr, _, err := cli.clientRequestAttemptLogin("GET", "/images/search?"+v.Encode(), nil, nil, indexInfo, "search") if err != nil { return err } @@ -63,7 +64,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error { w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") for _, res := range results { - if ((*automated || *trusted) && (!res.IsTrusted && !res.IsAutomated)) || (int(*stars) > res.StarCount) { + if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) || (*trusted && !res.IsTrusted) { continue } desc := strings.Replace(res.Description, "\n", " ", -1) diff --git a/vendor/github.com/docker/docker/api/client/service.go b/vendor/github.com/docker/docker/api/client/service.go deleted file mode 100644 index 9f0b1fcf..00000000 --- a/vendor/github.com/docker/docker/api/client/service.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build experimental - -package client - -import ( - "os" - - nwclient "github.com/docker/libnetwork/client" -) - -// CmdService is used to manage network services. -// service command is user to publish, attach and list a service from a container. -func (cli *DockerCli) CmdService(args ...string) error { - nCli := nwclient.NewNetworkCli(cli.out, cli.err, nwclient.CallFunc(cli.callWrapper)) - args = append([]string{"service"}, args...) - return nCli.Cmd(os.Args[0], args...) -} diff --git a/vendor/github.com/docker/docker/api/client/start.go b/vendor/github.com/docker/docker/api/client/start.go index e039df02..bff9c30e 100644 --- a/vendor/github.com/docker/docker/api/client/start.go +++ b/vendor/github.com/docker/docker/api/client/start.go @@ -32,6 +32,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { } if sig == "" { fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) + continue } if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil { logrus.Debugf("Error sending signal: %s", err) @@ -41,11 +42,11 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { return sigc } -// CmdStart starts one or more stopped containers. +// CmdStart starts one or more containers. // // Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdStart(args ...string) error { - cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, "Start one or more stopped containers", true) + cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["start"].Description, true) attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") cmd.Require(flag.Min, 1) diff --git a/vendor/github.com/docker/docker/api/client/stats.go b/vendor/github.com/docker/docker/api/client/stats.go index 1feb1e11..151e602e 100644 --- a/vendor/github.com/docker/docker/api/client/stats.go +++ b/vendor/github.com/docker/docker/api/client/stats.go @@ -13,7 +13,7 @@ import ( "github.com/docker/docker/api/types" Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/units" ) @@ -25,10 +25,17 @@ type containerStats struct { MemoryPercentage float64 NetworkRx float64 NetworkTx float64 + BlockRead float64 + BlockWrite float64 mu sync.RWMutex err error } +type stats struct { + mu sync.Mutex + cs []*containerStats +} + func (s *containerStats) Collect(cli *DockerCli, streamStats bool) { v := url.Values{} if streamStats { @@ -54,25 +61,33 @@ func (s *containerStats) Collect(cli *DockerCli, streamStats bool) { ) go func() { for { - var v *types.Stats + var v *types.StatsJSON if err := dec.Decode(&v); err != nil { u <- err return } - var ( + + var memPercent = 0.0 + var cpuPercent = 0.0 + + // MemoryStats.Limit will never be 0 unless the container is not running and we havn't + // got any data from cgroup + if v.MemoryStats.Limit != 0 { memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 - cpuPercent = 0.0 - ) - previousCPU = v.PreCpuStats.CpuUsage.TotalUsage - previousSystem = v.PreCpuStats.SystemUsage + } + + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) + blkRead, blkWrite := calculateBlockIO(v.BlkioStats) s.mu.Lock() s.CPUPercentage = cpuPercent s.Memory = float64(v.MemoryStats.Usage) s.MemoryLimit = float64(v.MemoryStats.Limit) s.MemoryPercentage = memPercent - s.NetworkRx = float64(v.Network.RxBytes) - s.NetworkTx = float64(v.Network.TxBytes) + s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) + s.BlockRead = float64(blkRead) + s.BlockWrite = float64(blkWrite) s.mu.Unlock() u <- nil if !streamStats { @@ -89,6 +104,11 @@ func (s *containerStats) Collect(cli *DockerCli, streamStats bool) { s.CPUPercentage = 0 s.Memory = 0 s.MemoryPercentage = 0 + s.MemoryLimit = 0 + s.NetworkRx = 0 + s.NetworkTx = 0 + s.BlockRead = 0 + s.BlockWrite = 0 s.mu.Unlock() case err := <-u: if err != nil { @@ -110,12 +130,13 @@ func (s *containerStats) Display(w io.Writer) error { if s.err != nil { return s.err } - fmt.Fprintf(w, "%s\t%.2f%%\t%s/%s\t%.2f%%\t%s/%s\n", + fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\n", s.Name, s.CPUPercentage, units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), s.MemoryPercentage, - units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx)) + units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), + units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite)) return nil } @@ -123,18 +144,41 @@ func (s *containerStats) Display(w io.Writer) error { // // This shows real-time information on CPU usage, memory usage, and network I/O. // -// Usage: docker stats CONTAINER [CONTAINER...] +// Usage: docker stats [OPTIONS] [CONTAINER...] func (cli *DockerCli) CmdStats(args ...string) error { - cmd := Cli.Subcmd("stats", []string{"CONTAINER [CONTAINER...]"}, "Display a live stream of one or more containers' resource usage statistics", true) + cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true) + all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result") - cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) names := cmd.Args() + showAll := len(names) == 0 + + if showAll { + v := url.Values{} + if *all { + v.Set("all", "1") + } + body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, nil)) + if err != nil { + return err + } + var cs []types.Container + if err := json.Unmarshal(body, &cs); err != nil { + return err + } + for _, c := range cs { + names = append(names, c.ID[:12]) + } + } + if len(names) == 0 && !showAll { + return fmt.Errorf("No containers found") + } sort.Strings(names) + var ( - cStats []*containerStats + cStats = stats{} w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) ) printHeader := func() { @@ -142,61 +186,166 @@ func (cli *DockerCli) CmdStats(args ...string) error { fmt.Fprint(cli.out, "\033[2J") fmt.Fprint(cli.out, "\033[H") } - io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O\n") + io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\n") } for _, n := range names { s := &containerStats{Name: n} - cStats = append(cStats, s) + // no need to lock here since only the main goroutine is running here + cStats.cs = append(cStats.cs, s) go s.Collect(cli, !*noStream) } + closeChan := make(chan error) + if showAll { + type watch struct { + cid string + event string + err error + } + getNewContainers := func(c chan<- watch) { + res, err := cli.call("GET", "/events", nil, nil) + if err != nil { + c <- watch{err: err} + return + } + defer res.body.Close() + + dec := json.NewDecoder(res.body) + for { + var j *jsonmessage.JSONMessage + if err := dec.Decode(&j); err != nil { + c <- watch{err: err} + return + } + c <- watch{j.ID[:12], j.Status, nil} + } + } + go func(stopChan chan<- error) { + cChan := make(chan watch) + go getNewContainers(cChan) + for { + c := <-cChan + if c.err != nil { + stopChan <- c.err + return + } + switch c.event { + case "create": + s := &containerStats{Name: c.cid} + cStats.mu.Lock() + cStats.cs = append(cStats.cs, s) + cStats.mu.Unlock() + go s.Collect(cli, !*noStream) + case "stop": + case "die": + if !*all { + var remove int + // cStats cannot be O(1) with a map cause ranging over it would cause + // containers in stats to move up and down in the list...:( + cStats.mu.Lock() + for i, s := range cStats.cs { + if s.Name == c.cid { + remove = i + break + } + } + cStats.cs = append(cStats.cs[:remove], cStats.cs[remove+1:]...) + cStats.mu.Unlock() + } + } + } + }(closeChan) + } else { + close(closeChan) + } // do a quick pause so that any failed connections for containers that do not exist are able to be // evicted before we display the initial or default values. time.Sleep(1500 * time.Millisecond) var errs []string - for _, c := range cStats { + cStats.mu.Lock() + for _, c := range cStats.cs { c.mu.Lock() if c.err != nil { errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) } c.mu.Unlock() } + cStats.mu.Unlock() if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, ", ")) } for range time.Tick(500 * time.Millisecond) { printHeader() toRemove := []int{} - for i, s := range cStats { + cStats.mu.Lock() + for i, s := range cStats.cs { if err := s.Display(w); err != nil && !*noStream { toRemove = append(toRemove, i) } } for j := len(toRemove) - 1; j >= 0; j-- { i := toRemove[j] - cStats = append(cStats[:i], cStats[i+1:]...) + cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...) } - if len(cStats) == 0 { + if len(cStats.cs) == 0 && !showAll { return nil } + cStats.mu.Unlock() w.Flush() if *noStream { break } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shudowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } } return nil } -func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.Stats) float64 { +func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { var ( cpuPercent = 0.0 // calculate the change for the cpu usage of the container in between readings - cpuDelta = float64(v.CpuStats.CpuUsage.TotalUsage - previousCPU) + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage - previousCPU) // calculate the change for the entire system between readings - systemDelta = float64(v.CpuStats.SystemUsage - previousSystem) + systemDelta = float64(v.CPUStats.SystemUsage - previousSystem) ) if systemDelta > 0.0 && cpuDelta > 0.0 { - cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CpuStats.CpuUsage.PercpuUsage)) * 100.0 + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 } return cpuPercent } + +func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} diff --git a/vendor/github.com/docker/docker/api/client/stats_unit_test.go b/vendor/github.com/docker/docker/api/client/stats_unit_test.go index 0831dbcb..301e0a67 100644 --- a/vendor/github.com/docker/docker/api/client/stats_unit_test.go +++ b/vendor/github.com/docker/docker/api/client/stats_unit_test.go @@ -4,6 +4,8 @@ import ( "bytes" "sync" "testing" + + "github.com/docker/docker/api/types" ) func TestDisplay(t *testing.T) { @@ -15,6 +17,8 @@ func TestDisplay(t *testing.T) { MemoryPercentage: 100.0 / 2048.0 * 100.0, NetworkRx: 100 * 1024 * 1024, NetworkTx: 800 * 1024 * 1024, + BlockRead: 100 * 1024 * 1024, + BlockWrite: 800 * 1024 * 1024, mu: sync.RWMutex{}, } var b bytes.Buffer @@ -22,8 +26,21 @@ func TestDisplay(t *testing.T) { t.Fatalf("c.Display() gave error: %s", err) } got := b.String() - want := "app\t30.00%\t104.9 MB/2.147 GB\t4.88%\t104.9 MB/838.9 MB\n" + want := "app\t30.00%\t104.9 MB / 2.147 GB\t4.88%\t104.9 MB / 838.9 MB\t104.9 MB / 838.9 MB\n" if got != want { t.Fatalf("c.Display() = %q, want %q", got, want) } } + +func TestCalculBlockIO(t *testing.T) { + blkio := types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}}, + } + blkRead, blkWrite := calculateBlockIO(blkio) + if blkRead != 5801 { + t.Fatalf("blkRead = %d, want 5801", blkRead) + } + if blkWrite != 579 { + t.Fatalf("blkWrite = %d, want 579", blkWrite) + } +} diff --git a/vendor/github.com/docker/docker/api/client/stop.go b/vendor/github.com/docker/docker/api/client/stop.go index b7348a7b..91f5e65b 100644 --- a/vendor/github.com/docker/docker/api/client/stop.go +++ b/vendor/github.com/docker/docker/api/client/stop.go @@ -9,13 +9,13 @@ import ( flag "github.com/docker/docker/pkg/mflag" ) -// CmdStop stops one or more running containers. +// CmdStop stops one or more containers. // // A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds). // // Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdStop(args ...string) error { - cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, "Stop a running container by sending SIGTERM and then SIGKILL after a\ngrace period", true) + cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["stop"].Description+".\nSending SIGTERM and then SIGKILL after a grace period", true) nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") cmd.Require(flag.Min, 1) diff --git a/vendor/github.com/docker/docker/api/client/tag.go b/vendor/github.com/docker/docker/api/client/tag.go index 454c7ec5..b2fd0592 100644 --- a/vendor/github.com/docker/docker/api/client/tag.go +++ b/vendor/github.com/docker/docker/api/client/tag.go @@ -13,7 +13,7 @@ import ( // // Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] func (cli *DockerCli) CmdTag(args ...string) error { - cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, "Tag an image into a repository", true) + cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true) force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") cmd.Require(flag.Exact, 2) diff --git a/vendor/github.com/docker/docker/api/client/top.go b/vendor/github.com/docker/docker/api/client/top.go index c9934fe0..8327820a 100644 --- a/vendor/github.com/docker/docker/api/client/top.go +++ b/vendor/github.com/docker/docker/api/client/top.go @@ -16,7 +16,7 @@ import ( // // Usage: docker top CONTAINER func (cli *DockerCli) CmdTop(args ...string) error { - cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, "Display the running processes of a container", true) + cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, Cli.DockerCommands["top"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) diff --git a/vendor/github.com/docker/docker/api/client/trust.go b/vendor/github.com/docker/docker/api/client/trust.go index b07cb79d..5936ac93 100644 --- a/vendor/github.com/docker/docker/api/client/trust.go +++ b/vendor/github.com/docker/docker/api/client/trust.go @@ -13,8 +13,8 @@ import ( "os" "path/filepath" "regexp" + "sort" "strconv" - "strings" "time" "github.com/Sirupsen/logrus" @@ -78,17 +78,19 @@ func (cli *DockerCli) certificateDirectory(server string) (string, error) { return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil } -func trustServer(index *registry.IndexInfo) string { +func trustServer(index *registry.IndexInfo) (string, error) { if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { - if !strings.HasPrefix(s, "https://") { - return "https://" + s + urlObj, err := url.Parse(s) + if err != nil || urlObj.Scheme != "https" { + return "", fmt.Errorf("valid https URL required for trust server, got %s", s) } - return s + + return s, nil } if index.Official { - return registry.NotaryServer + return registry.NotaryServer, nil } - return "https://" + index.Name + return "https://" + index.Name, nil } type simpleCredentialStore struct { @@ -100,9 +102,9 @@ func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { } func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, authConfig cliconfig.AuthConfig) (*client.NotaryRepository, error) { - server := trustServer(repoInfo.Index) - if !strings.HasPrefix(server, "https://") { - return nil, errors.New("unsupported scheme: https required for trust server") + server, err := trustServer(repoInfo.Index) + if err != nil { + return nil, err } var cfg = tlsconfig.ClientDefault @@ -143,15 +145,21 @@ func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, aut if err != nil { return nil, err } - resp, err := pingClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() challengeManager := auth.NewSimpleChallengeManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, err + + resp, err := pingClient.Do(req) + if err != nil { + // Ignore error on ping to operate in offline mode + logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) + } else { + defer resp.Body.Close() + + // Add response to the challenge manager to parse out + // authentication header and register authentication method + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } } creds := simpleCredentialStore{auth: authConfig} @@ -176,12 +184,33 @@ func convertTarget(t client.Target) (target, error) { } func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { - baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out) + aliasMap := map[string]string{ + "root": "root", + "snapshot": "repository", + "targets": "repository", + } + baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap) env := map[string]string{ "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), - "targets": os.Getenv("DOCKER_CONTENT_TRUST_TARGET_PASSPHRASE"), - "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_SNAPSHOT_PASSPHRASE"), + "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), } + + // Backwards compatibility with old env names. We should remove this in 1.10 + if env["root"] == "" { + if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE"); passphrase != "" { + env["root"] = passphrase + fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE\n") + } + } + if env["snapshot"] == "" || env["targets"] == "" { + if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"); passphrase != "" { + env["snapshot"] = passphrase + env["targets"] = passphrase + fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE\n") + } + } + return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { if v := env[alias]; v != "" { return v, numAttempts > 1, nil @@ -242,6 +271,8 @@ func notaryError(err error) error { return fmt.Errorf("remote repository out-of-date: %v", err) case trustmanager.ErrKeyNotFound: return fmt.Errorf("signing keys not found: %v", err) + case *net.OpError: + return fmt.Errorf("error contacting notary server: %v", err) } return err @@ -311,6 +342,22 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr return nil } +func selectKey(keys map[string]string) string { + if len(keys) == 0 { + return "" + } + + keyIDs := []string{} + for k := range keys { + keyIDs = append(keyIDs, k) + } + + // TODO(dmcgowan): let user choose if multiple keys, now pick consistently + sort.Strings(keyIDs) + + return keyIDs[0] +} + func targetStream(in io.Writer) (io.WriteCloser, <-chan []target) { r, w := io.Pipe() out := io.MultiWriter(in, w) @@ -409,16 +456,13 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string, ks := repo.KeyStoreManager keys := ks.RootKeyStore().ListKeys() - var rootKey string - if len(keys) == 0 { + rootKey := selectKey(keys) + if rootKey == "" { rootKey, err = ks.GenRootKey("ecdsa") if err != nil { return err } - } else { - // TODO(dmcgowan): let user choose - rootKey = keys[0] } cryptoService, err := ks.GetRootCryptoService(rootKey) diff --git a/vendor/github.com/docker/docker/api/client/trust_test.go b/vendor/github.com/docker/docker/api/client/trust_test.go new file mode 100644 index 00000000..af198de2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/trust_test.go @@ -0,0 +1,55 @@ +package client + +import ( + "os" + "testing" + + "github.com/docker/docker/registry" +) + +func unsetENV() { + os.Unsetenv("DOCKER_CONTENT_TRUST") + os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") +} + +func TestENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istry.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + output, err := trustServer(indexInfo) + expectedStr := "https://notary-test.com:5000" + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} + +func TestHTTPENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istry.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + _, err := trustServer(indexInfo) + if err == nil { + t.Fatal("Expected error with invalid scheme") + } +} + +func TestOfficialTrustServer(t *testing.T) { + indexInfo := ®istry.IndexInfo{Name: "testserver", Official: true} + output, err := trustServer(indexInfo) + if err != nil || output != registry.NotaryServer { + t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output) + } +} + +func TestNonOfficialTrustServer(t *testing.T) { + indexInfo := ®istry.IndexInfo{Name: "testserver", Official: false} + output, err := trustServer(indexInfo) + expectedStr := "https://" + indexInfo.Name + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} diff --git a/vendor/github.com/docker/docker/api/client/unpause.go b/vendor/github.com/docker/docker/api/client/unpause.go index cd1e6766..21e25857 100644 --- a/vendor/github.com/docker/docker/api/client/unpause.go +++ b/vendor/github.com/docker/docker/api/client/unpause.go @@ -11,7 +11,7 @@ import ( // // Usage: docker unpause CONTAINER [CONTAINER...] func (cli *DockerCli) CmdUnpause(args ...string) error { - cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, "Unpause all processes within a container", true) + cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["unpause"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) diff --git a/vendor/github.com/docker/docker/api/client/utils.go b/vendor/github.com/docker/docker/api/client/utils.go index 8f822155..7a8a7858 100644 --- a/vendor/github.com/docker/docker/api/client/utils.go +++ b/vendor/github.com/docker/docker/api/client/utils.go @@ -20,17 +20,18 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/types" - "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/cliconfig" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" + "github.com/docker/docker/utils" ) var ( - errConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + errConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") ) type serverResponse struct { @@ -76,7 +77,7 @@ func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers m req.Header.Set(k, v) } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION+" ("+runtime.GOOS+")") + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")") req.URL.Host = cli.addr req.URL.Scheme = cli.scheme @@ -94,13 +95,14 @@ func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers m if resp != nil { serverResp.statusCode = resp.StatusCode } + if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return serverResp, errConnectionRefused + if utils.IsTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, errConnectionFailed } - if cli.tlsConfig == nil { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?\n* Is your docker daemon up and running?", err) + if cli.tlsConfig == nil && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) } if cli.tlsConfig != nil && strings.Contains(err.Error(), "remote error: bad certificate") { return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) @@ -125,48 +127,51 @@ func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers m return serverResp, nil } -func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reader, out io.Writer, index *registry.IndexInfo, cmdName string) (io.ReadCloser, int, error) { - cmdAttempt := func(authConfig cliconfig.AuthConfig) (io.ReadCloser, int, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return nil, -1, err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - - // begin the request - serverResp, err := cli.clientRequest(method, path, in, map[string][]string{ - "X-Registry-Auth": registryAuthHeader, - }) - if err == nil && out != nil { - // If we are streaming output, complete the stream since - // errors may not appear until later. - err = cli.streamBody(serverResp.body, serverResp.header.Get("Content-Type"), true, out, nil) - } - if err != nil { - // Since errors in a stream appear after status 200 has been written, - // we may need to change the status code. - if strings.Contains(err.Error(), "Authentication is required") || - strings.Contains(err.Error(), "Status 401") || - strings.Contains(err.Error(), "401 Unauthorized") || - strings.Contains(err.Error(), "status code 401") { - serverResp.statusCode = http.StatusUnauthorized - } - } - return serverResp.body, serverResp.statusCode, err +// cmdAttempt builds the corresponding registry Auth Header from the given +// authConfig. It returns the servers body, status, error response +func (cli *DockerCli) cmdAttempt(authConfig cliconfig.AuthConfig, method, path string, in io.Reader, out io.Writer) (io.ReadCloser, int, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return nil, -1, err } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + // begin the request + serverResp, err := cli.clientRequest(method, path, in, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + if err == nil && out != nil { + // If we are streaming output, complete the stream since + // errors may not appear until later. + err = cli.streamBody(serverResp.body, serverResp.header.Get("Content-Type"), true, out, nil) + } + if err != nil { + // Since errors in a stream appear after status 200 has been written, + // we may need to change the status code. + if strings.Contains(err.Error(), "Authentication is required") || + strings.Contains(err.Error(), "Status 401") || + strings.Contains(err.Error(), "401 Unauthorized") || + strings.Contains(err.Error(), "status code 401") { + serverResp.statusCode = http.StatusUnauthorized + } + } + return serverResp.body, serverResp.statusCode, err +} + +func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reader, out io.Writer, index *registry.IndexInfo, cmdName string) (io.ReadCloser, int, error) { // Resolve the Auth config relevant for this server authConfig := registry.ResolveAuthConfig(cli.configFile, index) - body, statusCode, err := cmdAttempt(authConfig) + body, statusCode, err := cli.cmdAttempt(authConfig, method, path, in, out) if statusCode == http.StatusUnauthorized { fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) if err = cli.CmdLogin(index.GetAuthConfigKey()); err != nil { return nil, -1, err } authConfig = registry.ResolveAuthConfig(cli.configFile, index) - return cmdAttempt(authConfig) + return cli.cmdAttempt(authConfig, method, path, in, out) } return body, statusCode, err } @@ -277,7 +282,7 @@ func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { serverResp, err := cli.call("GET", "/containers/"+containerID+"/json", nil, nil) if err != nil { // If we can't connect, then the daemon probably died. - if err != errConnectionRefused { + if err != errConnectionFailed { return false, -1, err } return false, -1, nil @@ -299,7 +304,7 @@ func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { serverResp, err := cli.call("GET", "/exec/"+execID+"/json", nil, nil) if err != nil { // If we can't connect, then the daemon probably died. - if err != errConnectionRefused { + if err != errConnectionFailed { return false, -1, err } return false, -1, nil diff --git a/vendor/github.com/docker/docker/api/client/version.go b/vendor/github.com/docker/docker/api/client/version.go index 2f1dba07..e14e0cab 100644 --- a/vendor/github.com/docker/docker/api/client/version.go +++ b/vendor/github.com/docker/docker/api/client/version.go @@ -7,15 +7,15 @@ import ( "github.com/docker/docker/api" "github.com/docker/docker/api/types" - "github.com/docker/docker/autogen/dockerversion" Cli "github.com/docker/docker/cli" + "github.com/docker/docker/dockerversion" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/utils" ) var versionTemplate = `Client: Version: {{.Client.Version}} - API version: {{.Client.ApiVersion}} + API version: {{.Client.APIVersion}} Go version: {{.Client.GoVersion}} Git commit: {{.Client.GitCommit}} Built: {{.Client.BuildTime}} @@ -24,7 +24,7 @@ var versionTemplate = `Client: Server: Version: {{.Server.Version}} - API version: {{.Server.ApiVersion}} + API version: {{.Server.APIVersion}} Go version: {{.Server.GoVersion}} Git commit: {{.Server.GitCommit}} Built: {{.Server.BuildTime}} @@ -43,7 +43,7 @@ type versionData struct { // // Usage: docker version func (cli *DockerCli) CmdVersion(args ...string) (err error) { - cmd := Cli.Subcmd("version", nil, "Show the Docker version information.", true) + cmd := Cli.Subcmd("version", nil, Cli.DockerCommands["version"].Description, true) tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") cmd.Require(flag.Exact, 0) @@ -60,11 +60,11 @@ func (cli *DockerCli) CmdVersion(args ...string) (err error) { vd := versionData{ Client: types.Version{ - Version: dockerversion.VERSION, - ApiVersion: api.Version, + Version: dockerversion.Version, + APIVersion: api.Version, GoVersion: runtime.Version(), - GitCommit: dockerversion.GITCOMMIT, - BuildTime: dockerversion.BUILDTIME, + GitCommit: dockerversion.GitCommit, + BuildTime: dockerversion.BuildTime, Os: runtime.GOOS, Arch: runtime.GOARCH, Experimental: utils.ExperimentalBuild(), diff --git a/vendor/github.com/docker/docker/api/client/volume.go b/vendor/github.com/docker/docker/api/client/volume.go new file mode 100644 index 00000000..1dc0ea2d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/volume.go @@ -0,0 +1,234 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/url" + "text/tabwriter" + "text/template" + + "github.com/docker/docker/api/types" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/parsers/filters" +) + +// CmdVolume is the parent subcommand for all volume commands +// +// Usage: docker volume +func (cli *DockerCli) CmdVolume(args ...string) error { + description := Cli.DockerCommands["volume"].Description + "\n\nCommands:\n" + commands := [][]string{ + {"create", "Create a volume"}, + {"inspect", "Return low-level information on a volume"}, + {"ls", "List volumes"}, + {"rm", "Remove a volume"}, + } + + for _, cmd := range commands { + description += fmt.Sprintf(" %-25.25s%s\n", cmd[0], cmd[1]) + } + + description += "\nRun 'docker volume COMMAND --help' for more information on a command" + cmd := Cli.Subcmd("volume", []string{"[COMMAND]"}, description, false) + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdVolumeLs outputs a list of Docker volumes. +// +// Usage: docker volume ls [OPTIONS] +func (cli *DockerCli) CmdVolumeLs(args ...string) error { + cmd := Cli.Subcmd("volume ls", nil, "List volumes", true) + + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display volume names") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volFilterArgs := filters.Args{} + for _, f := range flFilter.GetAll() { + var err error + volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) + if err != nil { + return err + } + } + + v := url.Values{} + if len(volFilterArgs) > 0 { + filterJSON, err := filters.ToParam(volFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJSON) + } + + resp, err := cli.call("GET", "/volumes?"+v.Encode(), nil, nil) + if err != nil { + return err + } + + var volumes types.VolumesListResponse + if err := json.NewDecoder(resp.body).Decode(&volumes); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintf(w, "DRIVER \tVOLUME NAME") + fmt.Fprintf(w, "\n") + } + + for _, vol := range volumes.Volumes { + if *quiet { + fmt.Fprintln(w, vol.Name) + continue + } + fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) + } + w.Flush() + return nil +} + +// CmdVolumeInspect displays low-level information on one or more volumes. +// +// Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeInspect(args ...string) error { + cmd := Cli.Subcmd("volume inspect", []string{"VOLUME [VOLUME...]"}, "Return low-level information on a volume", true) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + if err := cmd.Parse(args); err != nil { + return nil + } + + var tmpl *template.Template + if *tmplStr != "" { + var err error + tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr) + if err != nil { + return err + } + } + + var status = 0 + var volumes []*types.Volume + for _, name := range cmd.Args() { + resp, err := cli.call("GET", "/volumes/"+name, nil, nil) + if err != nil { + return err + } + + var volume types.Volume + if err := json.NewDecoder(resp.body).Decode(&volume); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + + if tmpl == nil { + volumes = append(volumes, &volume) + continue + } + + if err := tmpl.Execute(cli.out, &volume); err != nil { + if err := tmpl.Execute(cli.out, &volume); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } + io.WriteString(cli.out, "\n") + } + + if tmpl != nil { + return nil + } + + b, err := json.MarshalIndent(volumes, "", " ") + if err != nil { + return err + } + _, err = io.Copy(cli.out, bytes.NewReader(b)) + if err != nil { + return err + } + io.WriteString(cli.out, "\n") + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +// CmdVolumeCreate creates a new container from a given image. +// +// Usage: docker volume create [OPTIONS] +func (cli *DockerCli) CmdVolumeCreate(args ...string) error { + cmd := Cli.Subcmd("volume create", nil, "Create a volume", true) + flDriver := cmd.String([]string{"d", "-driver"}, "local", "Specify volume driver name") + flName := cmd.String([]string{"-name"}, "", "Specify volume name") + + flDriverOpts := opts.NewMapOpts(nil, nil) + cmd.Var(flDriverOpts, []string{"o", "-opt"}, "Set driver specific options") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volReq := &types.VolumeCreateRequest{ + Driver: *flDriver, + DriverOpts: flDriverOpts.GetAll(), + } + + if *flName != "" { + volReq.Name = *flName + } + + resp, err := cli.call("POST", "/volumes/create", volReq, nil) + if err != nil { + return err + } + + var vol types.Volume + if err := json.NewDecoder(resp.body).Decode(&vol); err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", vol.Name) + return nil +} + +// CmdVolumeRm removes one or more containers. +// +// Usage: docker volume rm VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeRm(args ...string) error { + cmd := Cli.Subcmd("volume rm", []string{"VOLUME [VOLUME...]"}, "Remove a volume", true) + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + var status = 0 + for _, name := range cmd.Args() { + _, err := cli.call("DELETE", "/volumes/"+name, nil, nil) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(cli.out, "%s\n", name) + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/wait.go b/vendor/github.com/docker/docker/api/client/wait.go index 829a320c..3b03f706 100644 --- a/vendor/github.com/docker/docker/api/client/wait.go +++ b/vendor/github.com/docker/docker/api/client/wait.go @@ -13,7 +13,7 @@ import ( // // Usage: docker wait CONTAINER [CONTAINER...] func (cli *DockerCli) CmdWait(args ...string) error { - cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, "Block until a container stops, then print its exit code.", true) + cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["wait"].Description, true) cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index d31c557e..3a845028 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -5,6 +5,7 @@ import ( "mime" "path/filepath" "sort" + "strconv" "strings" "github.com/Sirupsen/logrus" @@ -16,39 +17,53 @@ import ( // Common constants for daemon and client. const ( - // Current REST API version - Version version.Version = "1.21" + // Version of Current REST API + Version version.Version = "1.22" - // Minimun REST API version supported + // MinVersion represents Minimun REST API version supported MinVersion version.Version = "1.12" - // Default filename with Docker commands, read by docker build + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build DefaultDockerfileName string = "Dockerfile" ) -type ByPrivatePort []types.Port +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port -func (r ByPrivatePort) Len() int { return len(r) } -func (r ByPrivatePort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r ByPrivatePort) Less(i, j int) bool { return r[i].PrivatePort < r[j].PrivatePort } +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' func DisplayablePorts(ports []types.Port) string { - var ( - result = []string{} - hostMappings = []string{} - firstInGroupMap map[string]int - lastInGroupMap map[string]int - ) - firstInGroupMap = make(map[string]int) - lastInGroupMap = make(map[string]int) - sort.Sort(ByPrivatePort(ports)) + type portGroup struct { + first int + last int + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) for _, port := range ports { - var ( - current = port.PrivatePort - portKey = port.Type - firstInGroup int - lastInGroup int - ) + current := port.PrivatePort + portKey := port.Type if port.IP != "" { if port.PublicPort != current { hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) @@ -56,45 +71,41 @@ func DisplayablePorts(ports []types.Port) string { } portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) } - firstInGroup = firstInGroupMap[portKey] - lastInGroup = lastInGroupMap[portKey] + group := groupMap[portKey] - if firstInGroup == 0 { - firstInGroupMap[portKey] = current - lastInGroupMap[portKey] = current + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current continue } - if current == (lastInGroup + 1) { - lastInGroupMap[portKey] = current - continue - } - result = append(result, FormGroup(portKey, firstInGroup, lastInGroup)) - firstInGroupMap[portKey] = current - lastInGroupMap[portKey] = current + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} } - for portKey, firstInGroup := range firstInGroupMap { - result = append(result, FormGroup(portKey, firstInGroup, lastInGroupMap[portKey])) + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) } result = append(result, hostMappings...) return strings.Join(result, ", ") } -func FormGroup(key string, start, last int) string { - var ( - group string - parts = strings.Split(key, "/") - groupType = parts[0] - ip = "" - ) +func formGroup(key string, start, last int) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string if len(parts) > 1 { ip = parts[0] groupType = parts[1] } - if start == last { - group = fmt.Sprintf("%d", start) - } else { - group = fmt.Sprintf("%d-%d", start, last) + group := strconv.Itoa(start) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) } if ip != "" { group = fmt.Sprintf("%s:%s->%s", ip, group, group) @@ -102,6 +113,7 @@ func FormGroup(key string, start, last int) string { return fmt.Sprintf("%s/%s", group, groupType) } +// MatchesContentType validates the content type against the expected one func MatchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { diff --git a/vendor/github.com/docker/docker/api/common_test.go b/vendor/github.com/docker/docker/api/common_test.go new file mode 100644 index 00000000..aa30099d --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_test.go @@ -0,0 +1,340 @@ +package api + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/api/types" + "os" +) + +type ports struct { + ports []types.Port + expected string +} + +// DisplayablePorts +func TestDisplayablePorts(t *testing.T) { + cases := []ports{ + { + []types.Port{ + { + PrivatePort: 9988, + Type: "tcp", + }, + }, + "9988/tcp"}, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp", + }, + { + []types.Port{ + { + IP: "0.0.0.0", + PrivatePort: 9988, + Type: "tcp", + }, + }, + "0.0.0.0:0->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "4.3.2.1:8899->9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 9988, + Type: "tcp", + }, + }, + "4.3.2.1:9988->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp, 9988/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 9998, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 9999, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:9998-9999->9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 8887, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 8888, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", + }, + { + []types.Port{ + { + PrivatePort: 9998, + Type: "udp", + }, { + PrivatePort: 9999, + Type: "udp", + }, + }, + "9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, + }, + "9988/udp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + PrivatePort: 80, + Type: "tcp", + }, { + PrivatePort: 1024, + Type: "tcp", + }, { + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, + }, + "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", + }, + } + + for _, port := range cases { + actual := DisplayablePorts(port.ports) + if port.expected != actual { + t.Fatalf("Expected %s, got %s.", port.expected, actual) + } + } +} + +// MatchesContentType +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + if err != nil { + t.Fatal(err) + } + + if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { + t.Fatalf("expected an error, got nothing.") + } + +} + +func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + // Without the need to create the folder hierarchy + tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With the need to create the folder hierarchy as tmpKeyFie is in a path + // where some folder do not exists. + tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With no path at all + defer os.Remove("keyfile") + if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat("keyfile"); err != nil { + t.Fatalf("Expected to find a file keyfile, got %v", err) + } +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("fixtures", "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a key file, got : %v and %v", err, key) + } +} diff --git a/vendor/github.com/docker/docker/api/fixtures/keyfile b/vendor/github.com/docker/docker/api/fixtures/keyfile new file mode 100644 index 00000000..322f2544 --- /dev/null +++ b/vendor/github.com/docker/docker/api/fixtures/keyfile @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY + +MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 +AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky +NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/api/server/form.go b/vendor/github.com/docker/docker/api/server/form.go deleted file mode 100644 index 6a8387a8..00000000 --- a/vendor/github.com/docker/docker/api/server/form.go +++ /dev/null @@ -1,56 +0,0 @@ -package server - -import ( - "fmt" - "net/http" - "strconv" - "strings" -) - -func boolValue(r *http.Request, k string) bool { - s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) - return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") -} - -// boolValueOrDefault returns the default bool passed if the query param is -// missing, otherwise it's just a proxy to boolValue above -func boolValueOrDefault(r *http.Request, k string, d bool) bool { - if _, ok := r.Form[k]; !ok { - return d - } - return boolValue(r, k) -} - -func int64ValueOrZero(r *http.Request, k string) int64 { - val, err := strconv.ParseInt(r.FormValue(k), 10, 64) - if err != nil { - return 0 - } - return val -} - -type archiveOptions struct { - name string - path string -} - -func archiveFormValues(r *http.Request, vars map[string]string) (archiveOptions, error) { - if vars == nil { - return archiveOptions{}, fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return archiveOptions{}, err - } - - name := vars["name"] - path := r.Form.Get("path") - - switch { - case name == "": - return archiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") - case path == "": - return archiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") - } - - return archiveOptions{name, path}, nil -} diff --git a/vendor/github.com/docker/docker/api/server/httputils/form.go b/vendor/github.com/docker/docker/api/server/httputils/form.go new file mode 100644 index 00000000..20188c12 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/form.go @@ -0,0 +1,73 @@ +package httputils + +import ( + "fmt" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + if err != nil { + return value, err + } + return value, nil + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/vendor/github.com/docker/docker/api/server/form_test.go b/vendor/github.com/docker/docker/api/server/httputils/form_test.go similarity index 55% rename from vendor/github.com/docker/docker/api/server/form_test.go rename to vendor/github.com/docker/docker/api/server/httputils/form_test.go index 5b3bd718..c56f7c15 100644 --- a/vendor/github.com/docker/docker/api/server/form_test.go +++ b/vendor/github.com/docker/docker/api/server/httputils/form_test.go @@ -1,4 +1,4 @@ -package server +package httputils import ( "net/http" @@ -26,7 +26,7 @@ func TestBoolValue(t *testing.T) { r, _ := http.NewRequest("POST", "", nil) r.Form = v - a := boolValue(r, "test") + a := BoolValue(r, "test") if a != e { t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) } @@ -35,7 +35,7 @@ func TestBoolValue(t *testing.T) { func TestBoolValueOrDefault(t *testing.T) { r, _ := http.NewRequest("GET", "", nil) - if !boolValueOrDefault(r, "queryparam", true) { + if !BoolValueOrDefault(r, "queryparam", true) { t.Fatal("Expected to get true default value, got false") } @@ -43,7 +43,7 @@ func TestBoolValueOrDefault(t *testing.T) { v.Set("param", "") r, _ = http.NewRequest("GET", "", nil) r.Form = v - if boolValueOrDefault(r, "param", true) { + if BoolValueOrDefault(r, "param", true) { t.Fatal("Expected not to get true") } } @@ -62,9 +62,44 @@ func TestInt64ValueOrZero(t *testing.T) { r, _ := http.NewRequest("POST", "", nil) r.Form = v - a := int64ValueOrZero(r, "test") + a := Int64ValueOrZero(r, "test") if a != e { t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) } } } + +func TestInt64ValueOrDefault(t *testing.T) { + cases := map[string]int64{ + "": -1, + "-1": -1, + "42": 42, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a, err := Int64ValueOrDefault(r, "test", -1) + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + if err != nil { + t.Fatalf("Error should be nil, but received: %s", err) + } + } +} + +func TestInt64ValueOrDefaultWithError(t *testing.T) { + v := url.Values{} + v.Set("test", "invalid") + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + _, err := Int64ValueOrDefault(r, "test", -1) + if err == nil { + t.Fatalf("Expected an error.") + } +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils.go b/vendor/github.com/docker/docker/api/server/httputils/httputils.go new file mode 100644 index 00000000..2997dc03 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils.go @@ -0,0 +1,180 @@ +package httputils + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/utils" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be register as a API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// ParseMultipartForm ensure the request form is parsed, even with invalid content types. +func ParseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// WriteError decodes a specific docker error and sends it in the response. +func WriteError(w http.ResponseWriter, err error) { + if err == nil || w == nil { + logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") + return + } + + statusCode := http.StatusInternalServerError + errMsg := err.Error() + + // Based on the type of error we get we need to process things + // slightly differently to extract the error message. + // In the 'errcode.*' cases there are two different type of + // error that could be returned. errocode.ErrorCode is the base + // type of error object - it is just an 'int' that can then be + // used as the look-up key to find the message. errorcode.Error + // extends errorcode.Error by adding error-instance specific + // data, like 'details' or variable strings to be inserted into + // the message. + // + // Ideally, we should just be able to call err.Error() for all + // cases but the errcode package doesn't support that yet. + // + // Additionally, in both errcode cases, there might be an http + // status code associated with it, and if so use it. + switch err.(type) { + case errcode.ErrorCode: + daError, _ := err.(errcode.ErrorCode) + statusCode = daError.Descriptor().HTTPStatusCode + errMsg = daError.Message() + + case errcode.Error: + // For reference, if you're looking for a particular error + // then you can do something like : + // import ( derr "github.com/docker/docker/errors" ) + // if daError.ErrorCode() == derr.ErrorCodeNoSuchContainer { ... } + + daError, _ := err.(errcode.Error) + statusCode = daError.ErrorCode().Descriptor().HTTPStatusCode + errMsg = daError.Message + + default: + // This part of will be removed once we've + // converted everything over to use the errcode package + + // FIXME: this is brittle and should not be necessary. + // If we need to differentiate between different possible error types, + // we should create appropriate error types with clearly defined meaning + errStr := strings.ToLower(err.Error()) + for keyword, status := range map[string]int{ + "not found": http.StatusNotFound, + "no such": http.StatusNotFound, + "bad parameter": http.StatusBadRequest, + "conflict": http.StatusConflict, + "impossible": http.StatusNotAcceptable, + "wrong login/password": http.StatusUnauthorized, + "hasn't been activated": http.StatusForbidden, + } { + if strings.Contains(errStr, keyword) { + statusCode = status + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + logrus.WithFields(logrus.Fields{"statusCode": statusCode, "err": utils.GetErrorMessage(err)}).Error("HTTP Error") + http.Error(w, errMsg, statusCode) +} + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return json.NewEncoder(w).Encode(v) +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) (ver version.Version) { + if ctx == nil { + return + } + val := ctx.Value(APIVersionKey) + if val == nil { + return + } + return val.(version.Version) +} diff --git a/vendor/github.com/docker/docker/api/server/middleware.go b/vendor/github.com/docker/docker/api/server/middleware.go new file mode 100644 index 00000000..628d0a3b --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware.go @@ -0,0 +1,154 @@ +package server + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/version" + "golang.org/x/net/context" +) + +// middleware is an adapter to allow the use of ordinary functions as Docker API filters. +// Any function that has the appropriate signature can be register as a middleware. +type middleware func(handler httputils.APIFunc) httputils.APIFunc + +// loggingMiddleware logs each request when logging is enabled. +func (s *Server) loggingMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if s.cfg.Logging { + logrus.Infof("%s %s", r.Method, r.RequestURI) + } + return handler(ctx, w, r, vars) + } +} + +// debugRequestMiddleware dumps the request to logger +// This is implemented separately from `loggingMiddleware` so that we don't have to +// check the logging level or have httputil.DumpRequest called on each request. +// Instead the middleware is only injected when the logging level is set to debug +func (s *Server) debugRequestMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if s.cfg.Logging && r.Method == "POST" { + if err := httputils.CheckForJSON(r); err == nil { + var buf bytes.Buffer + if _, err := buf.ReadFrom(r.Body); err == nil { + r.Body.Close() + r.Body = ioutil.NopCloser(&buf) + var postForm map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &postForm); err == nil { + if _, exists := postForm["password"]; exists { + postForm["password"] = "*****" + } + logrus.Debugf("form data: %q", postForm) + } + } + } + } + return handler(ctx, w, r, vars) + } +} + +// userAgentMiddleware checks the User-Agent header looking for a valid docker client spec. +func (s *Server) userAgentMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + dockerVersion := version.Version(s.cfg.Version) + + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + + // v1.20 onwards includes the GOOS of the client after the version + // such as Docker/1.7.0 (linux) + if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { + userAgent[1] = strings.Split(userAgent[1], " ")[0] + } + + if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { + logrus.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) + } + } + return handler(ctx, w, r, vars) + } +} + +// corsMiddleware sets the CORS header expectations in the server. +func (s *Server) corsMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := s.cfg.CorsHeaders + if corsHeaders == "" && s.cfg.EnableCors { + corsHeaders = "*" + } + + if corsHeaders != "" { + writeCorsHeaders(w, r, corsHeaders) + } + return handler(ctx, w, r, vars) + } +} + +// versionMiddleware checks the api version requirements before passing the request to the server handler. +func versionMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := version.Version(vars["version"]) + if apiVersion == "" { + apiVersion = api.Version + } + + if apiVersion.GreaterThan(api.Version) { + return errors.ErrorCodeNewerClientVersion.WithArgs(apiVersion, api.Version) + } + if apiVersion.LessThan(api.MinVersion) { + return errors.ErrorCodeOldClientVersion.WithArgs(apiVersion, api.Version) + } + + w.Header().Set("Server", "Docker/"+dockerversion.Version+" ("+runtime.GOOS+")") + ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion) + return handler(ctx, w, r, vars) + } +} + +// handleWithGlobalMiddlwares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaludated last. +// +// Example: handleWithGlobalMiddlewares(s.getContainersName) +// +// s.loggingMiddleware( +// s.userAgentMiddleware( +// s.corsMiddleware( +// versionMiddleware(s.getContainersName) +// ) +// ) +// ) +// ) +func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + middlewares := []middleware{ + versionMiddleware, + s.corsMiddleware, + s.userAgentMiddleware, + s.loggingMiddleware, + } + + // Only want this on debug level + // this is separate from the logging middleware so that we can do this check here once, + // rather than for each request. + if logrus.GetLevel() == logrus.DebugLevel { + middlewares = append(middlewares, s.debugRequestMiddleware) + } + + h := handler + for _, m := range middlewares { + h = m(h) + } + return h +} diff --git a/vendor/github.com/docker/docker/api/server/middleware_test.go b/vendor/github.com/docker/docker/api/server/middleware_test.go new file mode 100644 index 00000000..4f48b209 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware_test.go @@ -0,0 +1,57 @@ +package server + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/errors" + "golang.org/x/net/context" +) + +func TestVersionMiddleware(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + return nil + } + + h := versionMiddleware(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + if err := h(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} + +func TestVersionMiddlewareWithErrors(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + return nil + } + + h := versionMiddleware(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + vars := map[string]string{"version": "0.1"} + err := h(ctx, resp, req, vars) + if derr, ok := err.(errcode.Error); !ok || derr.ErrorCode() != errors.ErrorCodeOldClientVersion { + t.Fatalf("Expected ErrorCodeOldClientVersion, got %v", err) + } + + vars["version"] = "100000" + err = h(ctx, resp, req, vars) + if derr, ok := err.(errcode.Error); !ok || derr.ErrorCode() != errors.ErrorCodeNewerClientVersion { + t.Fatalf("Expected ErrorCodeNewerClientVersion, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/local/auth.go b/vendor/github.com/docker/docker/api/server/router/local/auth.go new file mode 100644 index 00000000..872124c9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local/auth.go @@ -0,0 +1,27 @@ +package local + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig" + "golang.org/x/net/context" +) + +func (s *router) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *cliconfig.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, err := s.daemon.AuthenticateToRegistry(config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.AuthResponse{ + Status: status, + }) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local/container.go b/vendor/github.com/docker/docker/api/server/router/local/container.go new file mode 100644 index 00000000..bedcae01 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local/container.go @@ -0,0 +1,468 @@ +package local + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *router) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + config := &daemon.ContainersConfig{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: r.Form.Get("filters"), + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.daemon.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *router) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + var out io.Writer + if !stream { + w.Header().Set("Content-Type", "application/json") + out = w + } else { + wf := ioutils.NewWriteFlusher(w) + out = wf + defer wf.Close() + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + config := &daemon.ContainerStatsConfig{ + Stream: stream, + OutStream: out, + Stop: closeNotifier, + Version: httputils.VersionFromContext(ctx), + } + + return s.daemon.ContainerStats(vars["name"], config) +} + +func (s *router) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + var since time.Time + if r.Form.Get("since") != "" { + s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64) + if err != nil { + return err + } + since = time.Unix(s, 0) + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + containerName := vars["name"] + + if !s.daemon.Exists(containerName) { + return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) + } + + // write an empty chunk of data (this is to ensure that the + // HTTP Response is sent immediately, even if the container has + // not yet produced any data) + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + logsConfig := &daemon.ContainerLogsConfig{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: since, + Tail: r.Form.Get("tail"), + UseStdout: stdout, + UseStderr: stderr, + OutStream: output, + Stop: closeNotifier, + } + + if err := s.daemon.ContainerLogs(containerName, logsConfig); err != nil { + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %s\n", utils.GetErrorMessage(err)) + } + + return nil +} + +func (s *router) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.daemon.ContainerExport(vars["name"], w) +} + +func (s *router) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + var hostConfig *runconfig.HostConfig + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := runconfig.DecodeHostConfig(r.Body) + if err != nil { + return err + } + + hostConfig = c + } + + if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *router) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + seconds, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *router) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.daemon.ContainerKill(name, uint64(sig)); err != nil { + theErr, isDerr := err.(errcode.ErrorCoder) + isStopped := isDerr && theErr.ErrorCode() == derr.ErrorCodeNotRunning + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if version.GreaterThanOrEqualTo("1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *router) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + timeout, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *router) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.daemon.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *router) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.daemon.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *router) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &types.ContainerWaitResponse{ + StatusCode: status, + }) +} + +func (s *router) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.daemon.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *router) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *router) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.daemon.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *router) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := version.LessThan("1.19") + + ccr, err := s.daemon.ContainerCreate(&daemon.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *router) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &daemon.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.daemon.ContainerRm(name, config); err != nil { + // Force a 404 for the empty string + if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { + return fmt.Errorf("no such id: \"\"") + } + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *router) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.daemon.ContainerResize(vars["name"], height, width) +} + +func (s *router) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + if !s.daemon.Exists(containerName) { + return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) + } + + if s.daemon.IsPaused(containerName) { + return derr.ErrorCodePausedContainer.WithArgs(containerName) + } + + inStream, outStream, err := httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + attachWithLogsConfig := &daemon.ContainerAttachWithLogsConfig{ + InStream: inStream, + OutStream: outStream, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + } + + if err := s.daemon.ContainerAttachWithLogs(containerName, attachWithLogsConfig); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + + return nil +} + +func (s *router) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + if !s.daemon.Exists(containerName) { + return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) + } + + h := websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + + wsAttachWithLogsConfig := &daemon.ContainerWsAttachWithLogsConfig{ + InStream: ws, + OutStream: ws, + ErrStream: ws, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + } + + if err := s.daemon.ContainerWsAttachWithLogs(containerName, wsAttachWithLogsConfig); err != nil { + logrus.Errorf("Error attaching websocket: %s", err) + } + }) + ws := websocket.Server{Handler: h, Handshake: nil} + ws.ServeHTTP(w, r) + + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/local/copy.go b/vendor/github.com/docker/docker/api/server/router/local/copy.go new file mode 100644 index 00000000..ff749a02 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local/copy.go @@ -0,0 +1,112 @@ +package local + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *router) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such id") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *router) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.daemon.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *router) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.daemon.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *router) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + return s.daemon.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local/exec.go b/vendor/github.com/docker/docker/api/server/router/local/exec.go new file mode 100644 index 00000000..14095d8e --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local/exec.go @@ -0,0 +1,135 @@ +package local + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/runconfig" + "golang.org/x/net/context" +) + +func (s *router) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.daemon.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *router) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &runconfig.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + execConfig.Container = name + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.daemon.ContainerExecCreate(execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %s", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *router) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if version.GreaterThan("1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.daemon.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } else { + outStream = w + } + + // Now run the user process in container. + if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + logrus.Errorf("Error running exec in container: %v\n", err) + } + return nil +} + +func (s *router) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.daemon.ContainerExecResize(vars["name"], height, width) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local/image.go b/vendor/github.com/docker/docker/api/server/router/local/image.go new file mode 100644 index 00000000..fd78ee2c --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local/image.go @@ -0,0 +1,547 @@ +package local + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon/daemonbuilder" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/graph" + "github.com/docker/docker/graph/tags" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/ulimit" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + "golang.org/x/net/context" +) + +func (s *router) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + pause = true + } + + c, _, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + + commitCfg := &dockerfile.CommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Changes: r.Form["changes"], + Config: c, + } + + if !s.daemon.Exists(cname) { + return derr.ErrorCodeNoSuchContainer.WithArgs(cname) + } + + imgID, err := dockerfile.Commit(cname, s.daemon, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *router) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + ) + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &cliconfig.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &cliconfig.AuthConfig{} + } + } + + var ( + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + if tag == "" { + image, tag = parsers.ParseRepositoryTag(image) + } + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + imagePullConfig := &graph.ImagePullConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + OutStream: output, + } + + err = s.daemon.PullImage(image, tag, imagePullConfig) + } else { //import + if tag == "" { + repo, tag = parsers.ParseRepositoryTag(repo) + } + + src := r.Form.Get("fromSrc") + + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + var newConfig *runconfig.Config + newConfig, err = dockerfile.BuildFromConfig(&runconfig.Config{}, r.Form["changes"]) + if err != nil { + return err + } + + err = s.daemon.ImportImage(src, repo, tag, message, r.Body, output, newConfig) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil +} + +func (s *router) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &cliconfig.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &cliconfig.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + name := vars["name"] + output := ioutils.NewWriteFlusher(w) + defer output.Close() + imagePushConfig := &graph.ImagePushConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + Tag: r.Form.Get("tag"), + OutStream: output, + } + + w.Header().Set("Content-Type", "application/json") + + if err := s.daemon.PushImage(name, imagePushConfig); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *router) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.daemon.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *router) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.daemon.LoadImage(r.Body, w) +} + +func (s *router) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.daemon.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *router) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.daemon.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]cliconfig.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + buildConfig = &dockerfile.Config{} + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + version := httputils.VersionFromContext(ctx) + output := ioutils.NewWriteFlusher(w) + defer output.Close() + sf := streamformatter.NewJSONStreamFormatter() + errf := func(err error) error { + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an interal error. + if !output.Flushed() { + return err + } + _, err = w.Write(sf.FormatError(errors.New(utils.GetErrorMessage(err)))) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { + buildConfig.Remove = true + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + buildConfig.Remove = true + } else { + buildConfig.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { + buildConfig.Pull = true + } + + repoAndTags, err := sanitizeRepoAndTags(r.Form["t"]) + if err != nil { + return errf(err) + } + + buildConfig.DockerfileName = r.FormValue("dockerfile") + buildConfig.Verbose = !httputils.BoolValue(r, "q") + buildConfig.UseCache = !httputils.BoolValue(r, "nocache") + buildConfig.ForceRemove = httputils.BoolValue(r, "forcerm") + buildConfig.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + buildConfig.Memory = httputils.Int64ValueOrZero(r, "memory") + buildConfig.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + buildConfig.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + buildConfig.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + buildConfig.CPUSetCpus = r.FormValue("cpusetcpus") + buildConfig.CPUSetMems = r.FormValue("cpusetmems") + buildConfig.CgroupParent = r.FormValue("cgroupparent") + + if i := runconfig.IsolationLevel(r.FormValue("isolation")); i != "" { + if !runconfig.IsolationLevel.IsValid(i) { + return errf(fmt.Errorf("Unsupported isolation: %q", i)) + } + buildConfig.Isolation = i + } + + var buildUlimits = []*ulimit.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { + return errf(err) + } + buildConfig.Ulimits = buildUlimits + } + + var buildArgs = map[string]string{} + buildArgsJSON := r.FormValue("buildargs") + if buildArgsJSON != "" { + if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { + return errf(err) + } + buildConfig.BuildArgs = buildArgs + } + + remoteURL := r.FormValue("remote") + + // Currently, only used if context is from a remote url. + // The field `In` is set by DetectContextFromRemoteURL. + // Look at code in DetectContextFromRemoteURL for more information. + pReader := &progressreader.Config{ + // TODO: make progressreader streamformatter-agnostic + Out: output, + Formatter: sf, + Size: r.ContentLength, + NewLines: true, + ID: "Downloading context", + Action: remoteURL, + } + + var ( + context builder.ModifiableContext + dockerfileName string + ) + context, dockerfileName, err = daemonbuilder.DetectContextFromRemoteURL(r.Body, remoteURL, pReader) + if err != nil { + return errf(err) + } + defer func() { + if err := context.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + }() + + uidMaps, gidMaps := s.daemon.GetUIDGIDMaps() + defaultArchiver := &archive.Archiver{ + Untar: chrootarchive.Untar, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + docker := &daemonbuilder.Docker{ + Daemon: s.daemon, + OutOld: output, + AuthConfigs: authConfigs, + Archiver: defaultArchiver, + } + + b, err := dockerfile.NewBuilder(buildConfig, docker, builder.DockerIgnoreContext{ModifiableContext: context}, nil) + if err != nil { + return errf(err) + } + b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} + b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf} + + if closeNotifier, ok := w.(http.CloseNotifier); ok { + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-finished: + case <-closeNotifier.CloseNotify(): + logrus.Infof("Client disconnected, cancelling job: build") + b.Cancel() + } + }() + } + + if len(dockerfileName) > 0 { + b.DockerfileName = dockerfileName + } + + imgID, err := b.Build() + if err != nil { + return errf(err) + } + + for _, rt := range repoAndTags { + if err := s.daemon.TagImage(rt.repo, rt.tag, string(imgID), true); err != nil { + return errf(err) + } + } + + return nil +} + +// repoAndTag is a helper struct for holding the parsed repositories and tags of +// the input "t" argument. +type repoAndTag struct { + repo, tag string +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]repoAndTag, error) { + var ( + repoAndTags []repoAndTag + // This map is used for deduplicating the "-t" paramter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + name, tag := parsers.ParseRepositoryTag(repo) + if name == "" { + continue + } + + if err := registry.ValidateRepositoryName(name); err != nil { + return nil, err + } + + nameWithTag := name + if len(tag) > 0 { + if err := tags.ValidateTagName(tag); err != nil { + return nil, err + } + nameWithTag += ":" + tag + } else { + nameWithTag += ":" + tags.DefaultTag + } + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, repoAndTag{repo: name, tag: tag}) + } + } + return repoAndTags, nil +} + +func (s *router) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // FIXME: The filter parameter could just be a match filter + images, err := s.daemon.ListImages(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *router) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.daemon.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *router) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + repo := r.Form.Get("repo") + tag := r.Form.Get("tag") + name := vars["name"] + force := httputils.BoolValue(r, "force") + if err := s.daemon.TagImage(repo, tag, name, force); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *router) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *cliconfig.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &cliconfig.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + query, err := s.daemon.SearchRegistryForImages(r.Form.Get("term"), config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local/info.go b/vendor/github.com/docker/docker/api/server/router/local/info.go new file mode 100644 index 00000000..06228a50 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local/info.go @@ -0,0 +1,141 @@ +package local + +import ( + "encoding/json" + "net/http" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/utils" + "golang.org/x/net/context" +) + +func (s *router) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v := &types.Version{ + Version: dockerversion.Version, + APIVersion: api.Version, + GitCommit: dockerversion.GitCommit, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + } + + version := httputils.VersionFromContext(ctx) + + if version.GreaterThanOrEqualTo("1.19") { + v.Experimental = utils.ExperimentalBuild() + } + + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + v.KernelVersion = kernelVersion.String() + } + + return httputils.WriteJSON(w, http.StatusOK, v) +} + +func (s *router) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.daemon.SystemInfo() + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *router) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + since, err := httputils.Int64ValueOrDefault(r, "since", -1) + if err != nil { + return err + } + until, err := httputils.Int64ValueOrDefault(r, "until", -1) + if err != nil { + return err + } + + timer := time.NewTimer(0) + timer.Stop() + if until > 0 { + dur := time.Unix(until, 0).Sub(time.Now()) + timer = time.NewTimer(dur) + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + + // This is to ensure that the HTTP status code is sent immediately, + // so that it will not block the receiver. + w.WriteHeader(http.StatusOK) + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + enc := json.NewEncoder(output) + + current, l, cancel := s.daemon.SubscribeToEvents() + defer cancel() + + eventFilter := s.daemon.GetEventFilter(ef) + handleEvent := func(ev *jsonmessage.JSONMessage) error { + if eventFilter.Include(ev) { + if err := enc.Encode(ev); err != nil { + return err + } + } + return nil + } + + if since == -1 { + current = nil + } + for _, ev := range current { + if ev.Time < since { + continue + } + if err := handleEvent(ev); err != nil { + return err + } + } + + var closeNotify <-chan bool + if closeNotifier, ok := w.(http.CloseNotifier); ok { + closeNotify = closeNotifier.CloseNotify() + } + + for { + select { + case ev := <-l: + jev, ok := ev.(*jsonmessage.JSONMessage) + if !ok { + continue + } + if err := handleEvent(jev); err != nil { + return err + } + case <-timer.C: + return nil + case <-closeNotify: + logrus.Debug("Client disconnected, stop sending events") + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/local/inspect.go b/vendor/github.com/docker/docker/api/server/router/local/inspect.go new file mode 100644 index 00000000..8b2a058e --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local/inspect.go @@ -0,0 +1,33 @@ +package local + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects containers configuration and serializes it as json. +func (s *router) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + var json interface{} + var err error + + version := httputils.VersionFromContext(ctx) + + switch { + case version.LessThan("1.20"): + json, err = s.daemon.ContainerInspectPre120(vars["name"]) + case version.Equal("1.20"): + json, err = s.daemon.ContainerInspect120(vars["name"]) + default: + json, err = s.daemon.ContainerInspect(vars["name"], displaySize) + } + + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local/local.go b/vendor/github.com/docker/docker/api/server/router/local/local.go new file mode 100644 index 00000000..b27031bd --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local/local.go @@ -0,0 +1,158 @@ +package local + +import ( + "net/http" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/server/httputils" + dkrouter "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon" +) + +// router is a docker router that talks with the local docker daemon. +type router struct { + daemon *daemon.Daemon + routes []dkrouter.Route +} + +// localRoute defines an individual API route to connect with the docker daemon. +// It implements router.Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initialies a new local route for the reouter +func NewRoute(method, path string, handler httputils.APIFunc) dkrouter.Route { + return localRoute{method, path, handler} +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("GET", path, handler) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("POST", path, handler) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("PUT", path, handler) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("DELETE", path, handler) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS +func NewOptionsRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("OPTIONS", path, handler) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc) dkrouter.Route { + return NewRoute("HEAD", path, handler) +} + +// NewRouter initializes a local router with a new daemon. +func NewRouter(daemon *daemon.Daemon) dkrouter.Router { + r := &router{ + daemon: daemon, + } + r.initRoutes() + return r +} + +// Routes returns the list of routes registered in the router. +func (r *router) Routes() []dkrouter.Route { + return r.routes +} + +// initRoutes initializes the routes in this router +func (r *router) initRoutes() { + r.routes = []dkrouter.Route{ + // HEAD + NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // OPTIONS + NewOptionsRoute("/", optionsHandler), + // GET + NewGetRoute("/_ping", pingHandler), + NewGetRoute("/events", r.getEvents), + NewGetRoute("/info", r.getInfo), + NewGetRoute("/version", r.getVersion), + NewGetRoute("/images/json", r.getImagesJSON), + NewGetRoute("/images/search", r.getImagesSearch), + NewGetRoute("/images/get", r.getImagesGet), + NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + NewGetRoute("/containers/json", r.getContainersJSON), + NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs), + NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats), + NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + NewPostRoute("/auth", r.postAuth), + NewPostRoute("/commit", r.postCommit), + NewPostRoute("/build", r.postBuild), + NewPostRoute("/images/create", r.postImagesCreate), + NewPostRoute("/images/load", r.postImagesLoad), + NewPostRoute("/images/{name:.*}/push", r.postImagesPush), + NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + NewPostRoute("/containers/create", r.postContainersCreate), + NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), + NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), + NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + // PUT + NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/backend.go b/vendor/github.com/docker/docker/api/server/router/network/backend.go new file mode 100644 index 00000000..8e069a4c --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/backend.go @@ -0,0 +1,23 @@ +package network + +import ( + // TODO: network config needs to be refactored out to a + // different location + "github.com/docker/docker/daemon/network" + + "github.com/docker/libnetwork" +) + +// Backend is all the methods that need to be implemented to provide +// network specific functionality +type Backend interface { + FindNetwork(idName string) (libnetwork.Network, error) + GetNetwork(idName string, by int) (libnetwork.Network, error) + GetNetworksByID(partialID string) []libnetwork.Network + CreateNetwork(name, driver string, ipam network.IPAM, + options map[string]string) (libnetwork.Network, error) + ConnectContainerToNetwork(containerName, networkName string) error + DisconnectContainerFromNetwork(containerName string, + network libnetwork.Network) error + NetworkControllerEnabled() bool +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/network.go b/vendor/github.com/docker/docker/api/server/router/network/network.go new file mode 100644 index 00000000..5fcb252c --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/network.go @@ -0,0 +1,56 @@ +package network + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/local" + "github.com/docker/docker/errors" + "golang.org/x/net/context" +) + +// networkRouter is a router to talk with the network controller +type networkRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new network router +func NewRouter(b Backend) router.Router { + r := &networkRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the network controller +func (r *networkRouter) Routes() []router.Route { + return r.routes +} + +func (r *networkRouter) initRoutes() { + r.routes = []router.Route{ + // GET + local.NewGetRoute("/networks", r.controllerEnabledMiddleware(r.getNetworksList)), + local.NewGetRoute("/networks/{id:.*}", r.controllerEnabledMiddleware(r.getNetwork)), + // POST + local.NewPostRoute("/networks/create", r.controllerEnabledMiddleware(r.postNetworkCreate)), + local.NewPostRoute("/networks/{id:.*}/connect", r.controllerEnabledMiddleware(r.postNetworkConnect)), + local.NewPostRoute("/networks/{id:.*}/disconnect", r.controllerEnabledMiddleware(r.postNetworkDisconnect)), + // DELETE + local.NewDeleteRoute("/networks/{id:.*}", r.controllerEnabledMiddleware(r.deleteNetwork)), + } +} + +func (r *networkRouter) controllerEnabledMiddleware(handler httputils.APIFunc) httputils.APIFunc { + if r.backend.NetworkControllerEnabled() { + return handler + } + return networkControllerDisabled +} + +func networkControllerDisabled(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return errors.ErrorNetworkControllerNotEnabled.WithArgs() +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/network_routes.go b/vendor/github.com/docker/docker/api/server/router/network/network_routes.go new file mode 100644 index 00000000..e4b5d740 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/network_routes.go @@ -0,0 +1,258 @@ +package network + +import ( + "encoding/json" + "fmt" + "net/http" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" +) + +func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + filter := r.Form.Get("filters") + netFilters, err := filters.FromParam(filter) + if err != nil { + return err + } + + list := []*types.NetworkResource{} + var nameFilter, idFilter bool + var names, ids []string + if names, nameFilter = netFilters["name"]; nameFilter { + for _, name := range names { + if nw, err := n.backend.GetNetwork(name, daemon.NetworkByName); err == nil { + list = append(list, buildNetworkResource(nw)) + } else { + logrus.Errorf("failed to get network for filter=%s : %v", name, err) + } + } + } + + if ids, idFilter = netFilters["id"]; idFilter { + for _, id := range ids { + for _, nw := range n.backend.GetNetworksByID(id) { + list = append(list, buildNetworkResource(nw)) + } + } + } + + if !nameFilter && !idFilter { + nwList := n.backend.GetNetworksByID("") + for _, nw := range nwList { + list = append(list, buildNetworkResource(nw)) + } + } + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, buildNetworkResource(nw)) +} + +func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var create types.NetworkCreate + var warning string + + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&create); err != nil { + return err + } + + if runconfig.IsPreDefinedNetwork(create.Name) { + return httputils.WriteJSON(w, http.StatusForbidden, + fmt.Sprintf("%s is a pre-defined network and cannot be created", create.Name)) + } + + nw, err := n.backend.GetNetwork(create.Name, daemon.NetworkByName) + if _, ok := err.(libnetwork.ErrNoSuchNetwork); err != nil && !ok { + return err + } + if nw != nil { + if create.CheckDuplicate { + return libnetwork.NetworkNameError(create.Name) + } + warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) + } + + nw, err = n.backend.CreateNetwork(create.Name, create.Driver, create.IPAM, create.Options) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.NetworkCreateResponse{ + ID: nw.ID(), + Warning: warning, + }) +} + +func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var connect types.NetworkConnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + return err + } + + return n.backend.ConnectContainerToNetwork(connect.Container, nw.Name()) +} + +func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var disconnect types.NetworkDisconnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + return err + } + + return n.backend.DisconnectContainerFromNetwork(disconnect.Container, nw) +} + +func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + return err + } + + if runconfig.IsPreDefinedNetwork(nw.Name()) { + return httputils.WriteJSON(w, http.StatusForbidden, + fmt.Sprintf("%s is a pre-defined network and cannot be removed", nw.Name())) + } + + return nw.Delete() +} + +func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { + r := &types.NetworkResource{} + if nw == nil { + return r + } + + r.Name = nw.Name() + r.ID = nw.ID() + r.Scope = nw.Info().Scope() + r.Driver = nw.Type() + r.Options = nw.Info().DriverOptions() + r.Containers = make(map[string]types.EndpointResource) + buildIpamResources(r, nw) + + epl := nw.Endpoints() + for _, e := range epl { + ei := e.Info() + if ei == nil { + continue + } + sb := ei.Sandbox() + if sb == nil { + continue + } + + r.Containers[sb.ContainerID()] = buildEndpointResource(e) + } + return r +} + +func buildIpamResources(r *types.NetworkResource, nw libnetwork.Network) { + id, ipv4conf, ipv6conf := nw.Info().IpamConfig() + + r.IPAM.Driver = id + + r.IPAM.Config = []network.IPAMConfig{} + for _, ip4 := range ipv4conf { + iData := network.IPAMConfig{} + iData.Subnet = ip4.PreferredPool + iData.IPRange = ip4.SubPool + iData.Gateway = ip4.Gateway + iData.AuxAddress = ip4.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + for _, ip6 := range ipv6conf { + iData := network.IPAMConfig{} + iData.Subnet = ip6.PreferredPool + iData.IPRange = ip6.SubPool + iData.Gateway = ip6.Gateway + iData.AuxAddress = ip6.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } +} + +func buildEndpointResource(e libnetwork.Endpoint) types.EndpointResource { + er := types.EndpointResource{} + if e == nil { + return er + } + + er.EndpointID = e.ID() + ei := e.Info() + if ei == nil { + return er + } + + if iface := ei.Iface(); iface != nil { + if mac := iface.MacAddress(); mac != nil { + er.MacAddress = mac.String() + } + if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { + er.IPv4Address = ip.String() + } + + if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { + er.IPv6Address = ipv6.String() + } + } + return er +} diff --git a/vendor/github.com/docker/docker/api/server/router/router.go b/vendor/github.com/docker/docker/api/server/router/router.go new file mode 100644 index 00000000..f3efa82f --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/router.go @@ -0,0 +1,18 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add the the docker server. +type Router interface { + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/backend.go b/vendor/github.com/docker/docker/api/server/router/volume/backend.go new file mode 100644 index 00000000..aa69972c --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/backend.go @@ -0,0 +1,16 @@ +package volume + +import ( + // TODO return types need to be refactored into pkg + "github.com/docker/docker/api/types" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, + opts map[string]string) (*types.Volume, error) + VolumeRm(name string) error +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume.go b/vendor/github.com/docker/docker/api/server/router/volume/volume.go new file mode 100644 index 00000000..8bd5c129 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume.go @@ -0,0 +1,38 @@ +package volume + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/local" +) + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volumeRouter +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +//Routes returns the available routers to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + local.NewGetRoute("/volumes", r.getVolumesList), + local.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + local.NewPostRoute("/volumes/create", r.postVolumesCreate), + // DELETE + local.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go new file mode 100644 index 00000000..5b0787c5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go @@ -0,0 +1,66 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.VolumesListResponse{Volumes: volumes}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req types.VolumeCreateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := v.backend.VolumeRm(vars["name"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/server.go b/vendor/github.com/docker/docker/api/server/server.go index 19ebc550..b90d704c 100644 --- a/vendor/github.com/docker/docker/api/server/server.go +++ b/vendor/github.com/docker/docker/api/server/server.go @@ -2,44 +2,28 @@ package server import ( "crypto/tls" - "encoding/base64" - "encoding/json" - "fmt" - "io" "net" "net/http" "os" - "runtime" - "strconv" "strings" - "time" - - "github.com/gorilla/mux" - "golang.org/x/net/websocket" "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/autogen/dockerversion" - "github.com/docker/docker/builder" - "github.com/docker/docker/cliconfig" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/local" + "github.com/docker/docker/api/server/router/network" + "github.com/docker/docker/api/server/router/volume" "github.com/docker/docker/daemon" - "github.com/docker/docker/graph" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/parsers/filters" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/sockets" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/ulimit" - "github.com/docker/docker/pkg/version" - "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" + "github.com/gorilla/mux" + "golang.org/x/net/context" ) +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + // Config provides the configuration for the API server type Config struct { Logging bool @@ -48,26 +32,39 @@ type Config struct { Version string SocketGroup string TLSConfig *tls.Config + Addrs []Addr } // Server contains instance details for the server type Server struct { - daemon *daemon.Daemon cfg *Config - router *mux.Router start chan struct{} - servers []serverCloser + servers []*HTTPServer + routers []router.Router +} + +// Addr contains string representation of address and its protocol (tcp, unix...). +type Addr struct { + Proto string + Addr string } // New returns a new instance of the server based on the specified configuration. -func New(cfg *Config) *Server { - srv := &Server{ +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) (*Server, error) { + s := &Server{ cfg: cfg, start: make(chan struct{}), } - r := createRouter(srv) - srv.router = r - return srv + for _, addr := range cfg.Addrs { + srv, err := s.newServer(addr.Proto, addr.Addr) + if err != nil { + return nil, err + } + logrus.Debugf("Server created for HTTP on %s (%s)", addr.Proto, addr.Addr) + s.servers = append(s.servers, srv...) + } + return s, nil } // Close closes servers and thus stop receiving requests @@ -79,39 +76,22 @@ func (s *Server) Close() { } } -type serverCloser interface { - Serve() error - Close() error -} - -// ServeAPI loops through all of the protocols sent in to docker and spawns -// off a go routine to setup a serving http.Server for each. -func (s *Server) ServeAPI(protoAddrs []string) error { - var chErrors = make(chan error, len(protoAddrs)) - - for _, protoAddr := range protoAddrs { - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - if len(protoAddrParts) != 2 { - return fmt.Errorf("bad format, expected PROTO://ADDR") - } - srv, err := s.newServer(protoAddrParts[0], protoAddrParts[1]) - if err != nil { - return err - } - s.servers = append(s.servers, srv...) - - for _, s := range srv { - logrus.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) - go func(s serverCloser) { - if err := s.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { - err = nil - } - chErrors <- err - }(s) - } +// ServeAPI loops through all initialized servers and spawns goroutine +// with Serve() method for each. +func (s *Server) ServeAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) } - for i := 0; i < len(protoAddrs); i++ { + for i := 0; i < len(s.servers); i++ { err := <-chErrors if err != nil { return err @@ -139,1430 +119,11 @@ func (s *HTTPServer) Close() error { return s.l.Close() } -// HTTPAPIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. -// Any function that has the appropriate signature can be register as a API endpoint (e.g. getVersion). -type HTTPAPIFunc func(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error - -func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - return nil, nil, err - } - // Flush the options to make sure the client sets the raw mode - conn.Write([]byte{}) - return conn, conn, nil -} - -func closeStreams(streams ...interface{}) { - for _, stream := range streams { - if tcpc, ok := stream.(interface { - CloseWrite() error - }); ok { - tcpc.CloseWrite() - } else if closer, ok := stream.(io.Closer); ok { - closer.Close() - } - } -} - -// checkForJSON makes sure that the request's Content-Type is application/json. -func checkForJSON(r *http.Request) error { - ct := r.Header.Get("Content-Type") - - // No Content-Type header is ok as long as there's no Body - if ct == "" { - if r.Body == nil || r.ContentLength == 0 { - return nil - } - } - - // Otherwise it better be json - if api.MatchesContentType(ct, "application/json") { - return nil - } - return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) -} - -//If we don't do this, POST method without Content-type (even with empty body) will fail -func parseForm(r *http.Request) error { - if r == nil { - return nil - } - if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -func parseMultipartForm(r *http.Request) error { - if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -func httpError(w http.ResponseWriter, err error) { - if err == nil || w == nil { - logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") - return - } - statusCode := http.StatusInternalServerError - // FIXME: this is brittle and should not be necessary. - // If we need to differentiate between different possible error types, we should - // create appropriate error types with clearly defined meaning. - errStr := strings.ToLower(err.Error()) - for keyword, status := range map[string]int{ - "not found": http.StatusNotFound, - "no such": http.StatusNotFound, - "bad parameter": http.StatusBadRequest, - "conflict": http.StatusConflict, - "impossible": http.StatusNotAcceptable, - "wrong login/password": http.StatusUnauthorized, - "hasn't been activated": http.StatusForbidden, - } { - if strings.Contains(errStr, keyword) { - statusCode = status - break - } - } - - logrus.WithFields(logrus.Fields{"statusCode": statusCode, "err": err}).Error("HTTP Error") - http.Error(w, err.Error(), statusCode) -} - -// writeJSON writes the value v to the http response stream as json with standard -// json encoding. -func writeJSON(w http.ResponseWriter, code int, v interface{}) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - return json.NewEncoder(w).Encode(v) -} - -func (s *Server) postAuth(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var config *cliconfig.AuthConfig - err := json.NewDecoder(r.Body).Decode(&config) - r.Body.Close() - if err != nil { - return err - } - status, err := s.daemon.RegistryService.Auth(config) - if err != nil { - return err - } - return writeJSON(w, http.StatusOK, &types.AuthResponse{ - Status: status, - }) -} - -func (s *Server) getVersion(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v := &types.Version{ - Version: dockerversion.VERSION, - ApiVersion: api.Version, - GitCommit: dockerversion.GITCOMMIT, - GoVersion: runtime.Version(), - Os: runtime.GOOS, - Arch: runtime.GOARCH, - BuildTime: dockerversion.BUILDTIME, - } - - if version.GreaterThanOrEqualTo("1.19") { - v.Experimental = utils.ExperimentalBuild() - } - - if kernelVersion, err := kernel.GetKernelVersion(); err == nil { - v.KernelVersion = kernelVersion.String() - } - - return writeJSON(w, http.StatusOK, v) -} - -func (s *Server) postContainersKill(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - - var sig uint64 - name := vars["name"] - - // If we have a signal, look at it. Otherwise, do nothing - if sigStr := r.Form.Get("signal"); sigStr != "" { - // Check if we passed the signal as a number: - // The largest legal signal is 31, so let's parse on 5 bits - sigN, err := strconv.ParseUint(sigStr, 10, 5) - if err != nil { - // The signal is not a number, treat it as a string (either like - // "KILL" or like "SIGKILL") - syscallSig, ok := signal.SignalMap[strings.TrimPrefix(sigStr, "SIG")] - if !ok { - return fmt.Errorf("Invalid signal: %s", sigStr) - } - sig = uint64(syscallSig) - } else { - sig = sigN - } - - if sig == 0 { - return fmt.Errorf("Invalid signal: %s", sigStr) - } - } - - if err := s.daemon.ContainerKill(name, sig); err != nil { - _, isStopped := err.(daemon.ErrContainerNotRunning) - // Return error that's not caused because the container is stopped. - // Return error if the container is not running and the api is >= 1.20 - // to keep backwards compatibility. - if version.GreaterThanOrEqualTo("1.20") || !isStopped { - return fmt.Errorf("Cannot kill container %s: %v", name, err) - } - } - - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *Server) postContainersPause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - - if err := s.daemon.ContainerPause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *Server) postContainersUnpause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - - if err := s.daemon.ContainerUnpause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *Server) getContainersExport(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - return s.daemon.ContainerExport(vars["name"], w) -} - -func (s *Server) getImagesJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - imagesConfig := graph.ImagesConfig{ - Filters: r.Form.Get("filters"), - // FIXME this parameter could just be a match filter - Filter: r.Form.Get("filter"), - All: boolValue(r, "all"), - } - - images, err := s.daemon.Repositories().Images(&imagesConfig) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, images) -} - -func (s *Server) getInfo(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info, err := s.daemon.SystemInfo() - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, info) -} - -func (s *Server) getEvents(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var since int64 = -1 - if r.Form.Get("since") != "" { - s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64) - if err != nil { - return err - } - since = s - } - - var until int64 = -1 - if r.Form.Get("until") != "" { - u, err := strconv.ParseInt(r.Form.Get("until"), 10, 64) - if err != nil { - return err - } - until = u - } - - timer := time.NewTimer(0) - timer.Stop() - if until > 0 { - dur := time.Unix(until, 0).Sub(time.Now()) - timer = time.NewTimer(dur) - } - - ef, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - isFiltered := func(field string, filter []string) bool { - if len(field) == 0 { - return false - } - if len(filter) == 0 { - return false - } - for _, v := range filter { - if v == field { - return false - } - if strings.Contains(field, ":") { - image := strings.Split(field, ":") - if image[0] == v { - return false - } - } - } - return true - } - - d := s.daemon - es := d.EventsService - w.Header().Set("Content-Type", "application/json") - outStream := ioutils.NewWriteFlusher(w) - outStream.Write(nil) // make sure response is sent immediately - enc := json.NewEncoder(outStream) - - getContainerID := func(cn string) string { - c, err := d.Get(cn) - if err != nil { - return "" - } - return c.ID - } - - sendEvent := func(ev *jsonmessage.JSONMessage) error { - //incoming container filter can be name,id or partial id, convert and replace as a full container id - for i, cn := range ef["container"] { - ef["container"][i] = getContainerID(cn) - } - - if isFiltered(ev.Status, ef["event"]) || (isFiltered(ev.ID, ef["image"]) && - isFiltered(ev.From, ef["image"])) || isFiltered(ev.ID, ef["container"]) { - return nil - } - - return enc.Encode(ev) - } - - current, l := es.Subscribe() - if since == -1 { - current = nil - } - defer es.Evict(l) - for _, ev := range current { - if ev.Time < since { - continue - } - if err := sendEvent(ev); err != nil { - return err - } - } - - var closeNotify <-chan bool - if closeNotifier, ok := w.(http.CloseNotifier); ok { - closeNotify = closeNotifier.CloseNotify() - } - - for { - select { - case ev := <-l: - jev, ok := ev.(*jsonmessage.JSONMessage) - if !ok { - continue - } - if err := sendEvent(jev); err != nil { - return err - } - case <-timer.C: - return nil - case <-closeNotify: - logrus.Debug("Client disconnected, stop sending events") - return nil - } - } -} - -func (s *Server) getImagesHistory(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - name := vars["name"] - history, err := s.daemon.Repositories().History(name) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, history) -} - -func (s *Server) getContainersChanges(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - changes, err := s.daemon.ContainerChanges(vars["name"]) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, changes) -} - -func (s *Server) getContainersTop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - if err := parseForm(r); err != nil { - return err - } - - procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args")) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, procList) -} - -func (s *Server) getContainersJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - config := &daemon.ContainersConfig{ - All: boolValue(r, "all"), - Size: boolValue(r, "size"), - Since: r.Form.Get("since"), - Before: r.Form.Get("before"), - Filters: r.Form.Get("filters"), - } - - if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { - limit, err := strconv.Atoi(tmpLimit) - if err != nil { - return err - } - config.Limit = limit - } - - containers, err := s.daemon.Containers(config) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, containers) -} - -func (s *Server) getContainersStats(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - stream := boolValueOrDefault(r, "stream", true) - var out io.Writer - if !stream { - w.Header().Set("Content-Type", "application/json") - out = w - } else { - out = ioutils.NewWriteFlusher(w) - } - - var closeNotifier <-chan bool - if notifier, ok := w.(http.CloseNotifier); ok { - closeNotifier = notifier.CloseNotify() - } - - config := &daemon.ContainerStatsConfig{ - Stream: stream, - OutStream: out, - Stop: closeNotifier, - } - - return s.daemon.ContainerStats(vars["name"], config) -} - -func (s *Server) getContainersLogs(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - // Validate args here, because we can't return not StatusOK after job.Run() call - stdout, stderr := boolValue(r, "stdout"), boolValue(r, "stderr") - if !(stdout || stderr) { - return fmt.Errorf("Bad parameters: you must choose at least one stream") - } - - var since time.Time - if r.Form.Get("since") != "" { - s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64) - if err != nil { - return err - } - since = time.Unix(s, 0) - } - - var closeNotifier <-chan bool - if notifier, ok := w.(http.CloseNotifier); ok { - closeNotifier = notifier.CloseNotify() - } - - c, err := s.daemon.Get(vars["name"]) - if err != nil { - return err - } - - outStream := ioutils.NewWriteFlusher(w) - // write an empty chunk of data (this is to ensure that the - // HTTP Response is sent immediatly, even if the container has - // not yet produced any data) - outStream.Write(nil) - - logsConfig := &daemon.ContainerLogsConfig{ - Follow: boolValue(r, "follow"), - Timestamps: boolValue(r, "timestamps"), - Since: since, - Tail: r.Form.Get("tail"), - UseStdout: stdout, - UseStderr: stderr, - OutStream: outStream, - Stop: closeNotifier, - } - - if err := s.daemon.ContainerLogs(c, logsConfig); err != nil { - fmt.Fprintf(w, "Error running logs job: %s\n", err) - } - - return nil -} - -func (s *Server) postImagesTag(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - repo := r.Form.Get("repo") - tag := r.Form.Get("tag") - force := boolValue(r, "force") - name := vars["name"] - if err := s.daemon.Repositories().Tag(repo, tag, name, force); err != nil { - return err - } - s.daemon.EventsService.Log("tag", utils.ImageReference(repo, tag), "") - w.WriteHeader(http.StatusCreated) - return nil -} - -func (s *Server) postCommit(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - if err := checkForJSON(r); err != nil { - return err - } - - cname := r.Form.Get("container") - - pause := boolValue(r, "pause") - if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { - pause = true - } - - c, _, err := runconfig.DecodeContainerConfig(r.Body) - if err != nil && err != io.EOF { //Do not fail if body is empty. - return err - } - - commitCfg := &builder.CommitConfig{ - Pause: pause, - Repo: r.Form.Get("repo"), - Tag: r.Form.Get("tag"), - Author: r.Form.Get("author"), - Comment: r.Form.Get("comment"), - Changes: r.Form["changes"], - Config: c, - } - - imgID, err := builder.Commit(cname, s.daemon, commitCfg) - if err != nil { - return err - } - - return writeJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ - ID: imgID, - }) -} - -// Creates an image from Pull or from Import -func (s *Server) postImagesCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var ( - image = r.Form.Get("fromImage") - repo = r.Form.Get("repo") - tag = r.Form.Get("tag") - ) - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &cliconfig.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &cliconfig.AuthConfig{} - } - } - - var ( - err error - output = ioutils.NewWriteFlusher(w) - ) - - w.Header().Set("Content-Type", "application/json") - - if image != "" { //pull - if tag == "" { - image, tag = parsers.ParseRepositoryTag(image) - } - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - imagePullConfig := &graph.ImagePullConfig{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - OutStream: output, - } - - err = s.daemon.Repositories().Pull(image, tag, imagePullConfig) - } else { //import - if tag == "" { - repo, tag = parsers.ParseRepositoryTag(repo) - } - - src := r.Form.Get("fromSrc") - imageImportConfig := &graph.ImageImportConfig{ - Changes: r.Form["changes"], - InConfig: r.Body, - OutStream: output, - } - - // 'err' MUST NOT be defined within this block, we need any error - // generated from the download to be available to the output - // stream processing below - var newConfig *runconfig.Config - newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, imageImportConfig.Changes) - if err != nil { - return err - } - imageImportConfig.ContainerConfig = newConfig - - err = s.daemon.Repositories().Import(src, repo, tag, imageImportConfig) - } - if err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - - return nil - -} - -func (s *Server) getImagesSearch(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - config *cliconfig.AuthConfig - authEncoded = r.Header.Get("X-Registry-Auth") - headers = map[string][]string{} - ) - - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(&config); err != nil { - // for a search it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - config = &cliconfig.AuthConfig{} - } - } - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - headers[k] = v - } - } - query, err := s.daemon.RegistryService.Search(r.Form.Get("term"), config, headers) - if err != nil { - return err - } - return writeJSON(w, http.StatusOK, query.Results) -} - -func (s *Server) postImagesPush(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - if err := parseForm(r); err != nil { - return err - } - authConfig := &cliconfig.AuthConfig{} - - authEncoded := r.Header.Get("X-Registry-Auth") - if authEncoded != "" { - // the new format is to handle the authConfig as a header - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // to increase compatibility to existing api it is defaulting to be empty - authConfig = &cliconfig.AuthConfig{} - } - } else { - // the old format is supported for compatibility if there was no authConfig header - if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { - return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) - } - } - - name := vars["name"] - output := ioutils.NewWriteFlusher(w) - imagePushConfig := &graph.ImagePushConfig{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - Tag: r.Form.Get("tag"), - OutStream: output, - } - - w.Header().Set("Content-Type", "application/json") - - if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - return nil - -} - -func (s *Server) getImagesGet(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - - output := ioutils.NewWriteFlusher(w) - imageExportConfig := &graph.ImageExportConfig{Outstream: output} - if name, ok := vars["name"]; ok { - imageExportConfig.Names = []string{name} - } else { - imageExportConfig.Names = r.Form["names"] - } - - if err := s.daemon.Repositories().ImageExport(imageExportConfig); err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - return nil - -} - -func (s *Server) postImagesLoad(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return s.daemon.Repositories().Load(r.Body, w) -} - -func (s *Server) postContainersCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if err := checkForJSON(r); err != nil { - return err - } - var ( - warnings []string - name = r.Form.Get("name") - ) - - config, hostConfig, err := runconfig.DecodeContainerConfig(r.Body) - if err != nil { - return err - } - adjustCPUShares(version, hostConfig) - - containerID, warnings, err := s.daemon.ContainerCreate(name, config, hostConfig) - if err != nil { - return err - } - - return writeJSON(w, http.StatusCreated, &types.ContainerCreateResponse{ - ID: containerID, - Warnings: warnings, - }) -} - -func (s *Server) postContainersRestart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - timeout, _ := strconv.Atoi(r.Form.Get("t")) - - if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *Server) postContainerRename(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - name := vars["name"] - newName := r.Form.Get("name") - if err := s.daemon.ContainerRename(name, newName); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *Server) deleteContainers(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - name := vars["name"] - config := &daemon.ContainerRmConfig{ - ForceRemove: boolValue(r, "force"), - RemoveVolume: boolValue(r, "v"), - RemoveLink: boolValue(r, "link"), - } - - if err := s.daemon.ContainerRm(name, config); err != nil { - // Force a 404 for the empty string - if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { - return fmt.Errorf("no such id: \"\"") - } - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *Server) deleteImages(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - name := vars["name"] - force := boolValue(r, "force") - noprune := boolValue(r, "noprune") - - list, err := s.daemon.ImageDelete(name, force, noprune) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, list) -} - -func (s *Server) postContainersStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - // If contentLength is -1, we can assumed chunked encoding - // or more technically that the length is unknown - // https://golang.org/src/pkg/net/http/request.go#L139 - // net/http otherwise seems to swallow any headers related to chunked encoding - // including r.TransferEncoding - // allow a nil body for backwards compatibility - var hostConfig *runconfig.HostConfig - if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { - if err := checkForJSON(r); err != nil { - return err - } - - c, err := runconfig.DecodeHostConfig(r.Body) - if err != nil { - return err - } - - hostConfig = c - } - - if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil { - if err.Error() == "Container already started" { - w.WriteHeader(http.StatusNotModified) - return nil - } - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *Server) postContainersStop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - seconds, _ := strconv.Atoi(r.Form.Get("t")) - - if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil { - if err.Error() == "Container already stopped" { - w.WriteHeader(http.StatusNotModified) - return nil - } - return err - } - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *Server) postContainersWait(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, &types.ContainerWaitResponse{ - StatusCode: status, - }) -} - -func (s *Server) postContainersResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return err - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return err - } - - return s.daemon.ContainerResize(vars["name"], height, width) -} - -func (s *Server) postContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - cont, err := s.daemon.Get(vars["name"]) - if err != nil { - return err - } - - inStream, outStream, err := hijackServer(w) - if err != nil { - return err - } - defer closeStreams(inStream, outStream) - - if _, ok := r.Header["Upgrade"]; ok { - fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - attachWithLogsConfig := &daemon.ContainerAttachWithLogsConfig{ - InStream: inStream, - OutStream: outStream, - UseStdin: boolValue(r, "stdin"), - UseStdout: boolValue(r, "stdout"), - UseStderr: boolValue(r, "stderr"), - Logs: boolValue(r, "logs"), - Stream: boolValue(r, "stream"), - } - - if err := s.daemon.ContainerAttachWithLogs(cont, attachWithLogsConfig); err != nil { - fmt.Fprintf(outStream, "Error attaching: %s\n", err) - } - - return nil -} - -func (s *Server) wsContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - cont, err := s.daemon.Get(vars["name"]) - if err != nil { - return err - } - - h := websocket.Handler(func(ws *websocket.Conn) { - defer ws.Close() - - wsAttachWithLogsConfig := &daemon.ContainerWsAttachWithLogsConfig{ - InStream: ws, - OutStream: ws, - ErrStream: ws, - Logs: boolValue(r, "logs"), - Stream: boolValue(r, "stream"), - } - - if err := s.daemon.ContainerWsAttachWithLogs(cont, wsAttachWithLogsConfig); err != nil { - logrus.Errorf("Error attaching websocket: %s", err) - } - }) - h.ServeHTTP(w, r) - - return nil -} - -func (s *Server) getContainersByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - if version.LessThan("1.20") && runtime.GOOS != "windows" { - return getContainersByNameDownlevel(w, s, vars["name"]) - } - - containerJSON, err := s.daemon.ContainerInspect(vars["name"]) - if err != nil { - return err - } - return writeJSON(w, http.StatusOK, containerJSON) -} - -func (s *Server) getExecByID(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter 'id'") - } - - eConfig, err := s.daemon.ContainerExecInspect(vars["id"]) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, eConfig) -} - -func (s *Server) getImagesByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - imageInspect, err := s.daemon.Repositories().Lookup(vars["name"]) - if err != nil { - return err - } - - return writeJSON(w, http.StatusOK, imageInspect) -} - -func (s *Server) postBuild(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var ( - authConfigs = map[string]cliconfig.AuthConfig{} - authConfigsEncoded = r.Header.Get("X-Registry-Config") - buildConfig = builder.NewBuildConfig() - ) - - if authConfigsEncoded != "" { - authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) - if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting - // to be empty. - } - } - - w.Header().Set("Content-Type", "application/json") - - if boolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { - buildConfig.Remove = true - } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { - buildConfig.Remove = true - } else { - buildConfig.Remove = boolValue(r, "rm") - } - if boolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { - buildConfig.Pull = true - } - - output := ioutils.NewWriteFlusher(w) - buildConfig.Stdout = output - buildConfig.Context = r.Body - - buildConfig.RemoteURL = r.FormValue("remote") - buildConfig.DockerfileName = r.FormValue("dockerfile") - buildConfig.RepoName = r.FormValue("t") - buildConfig.SuppressOutput = boolValue(r, "q") - buildConfig.NoCache = boolValue(r, "nocache") - buildConfig.ForceRemove = boolValue(r, "forcerm") - buildConfig.AuthConfigs = authConfigs - buildConfig.MemorySwap = int64ValueOrZero(r, "memswap") - buildConfig.Memory = int64ValueOrZero(r, "memory") - buildConfig.CPUShares = int64ValueOrZero(r, "cpushares") - buildConfig.CPUPeriod = int64ValueOrZero(r, "cpuperiod") - buildConfig.CPUQuota = int64ValueOrZero(r, "cpuquota") - buildConfig.CPUSetCpus = r.FormValue("cpusetcpus") - buildConfig.CPUSetMems = r.FormValue("cpusetmems") - buildConfig.CgroupParent = r.FormValue("cgroupparent") - - var buildUlimits = []*ulimit.Ulimit{} - ulimitsJSON := r.FormValue("ulimits") - if ulimitsJSON != "" { - if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { - return err - } - buildConfig.Ulimits = buildUlimits - } - - // Job cancellation. Note: not all job types support this. - if closeNotifier, ok := w.(http.CloseNotifier); ok { - finished := make(chan struct{}) - defer close(finished) - go func() { - select { - case <-finished: - case <-closeNotifier.CloseNotify(): - logrus.Infof("Client disconnected, cancelling job: build") - buildConfig.Cancel() - } - }() - } - - if err := builder.Build(s.daemon, buildConfig); err != nil { - // Do not write the error in the http output if it's still empty. - // This prevents from writing a 200(OK) when there is an interal error. - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - w.Write(sf.FormatError(err)) - } - return nil -} - -// postContainersCopy is deprecated in favor of getContainersArchivePath. -func (s *Server) postContainersCopy(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - if err := checkForJSON(r); err != nil { - return err - } - - cfg := types.CopyConfig{} - if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { - return err - } - - if cfg.Resource == "" { - return fmt.Errorf("Path cannot be empty") - } - - data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource) - if err != nil { - if strings.Contains(strings.ToLower(err.Error()), "no such id") { - w.WriteHeader(http.StatusNotFound) - return nil - } - if os.IsNotExist(err) { - return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) - } - return err - } - defer data.Close() - - w.Header().Set("Content-Type", "application/x-tar") - if _, err := io.Copy(w, data); err != nil { - return err - } - - return nil -} - -// // Encode the stat to JSON, base64 encode, and place in a header. -func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { - statJSON, err := json.Marshal(stat) - if err != nil { - return err - } - - header.Set( - "X-Docker-Container-Path-Stat", - base64.StdEncoding.EncodeToString(statJSON), - ) - - return nil -} - -func (s *Server) headContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := archiveFormValues(r, vars) - if err != nil { - return err - } - - stat, err := s.daemon.ContainerStatPath(v.name, v.path) - if err != nil { - return err - } - - return setContainerPathStatHeader(stat, w.Header()) -} - -func (s *Server) getContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := archiveFormValues(r, vars) - if err != nil { - return err - } - - tarArchive, stat, err := s.daemon.ContainerArchivePath(v.name, v.path) - if err != nil { - return err - } - defer tarArchive.Close() - - if err := setContainerPathStatHeader(stat, w.Header()); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - _, err = io.Copy(w, tarArchive) - - return err -} - -func (s *Server) putContainersArchive(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := archiveFormValues(r, vars) - if err != nil { - return err - } - - noOverwriteDirNonDir := boolValue(r, "noOverwriteDirNonDir") - return s.daemon.ContainerExtractToDir(v.name, v.path, noOverwriteDirNonDir, r.Body) -} - -func (s *Server) postContainerExecCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if err := checkForJSON(r); err != nil { - return err - } - name := vars["name"] - - execConfig := &runconfig.ExecConfig{} - if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { - return err - } - execConfig.Container = name - - if len(execConfig.Cmd) == 0 { - return fmt.Errorf("No exec command specified") - } - - // Register an instance of Exec in container. - id, err := s.daemon.ContainerExecCreate(execConfig) - if err != nil { - logrus.Errorf("Error setting up exec command in container %s: %s", name, err) - return err - } - - return writeJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ - ID: id, - }) -} - -// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. -func (s *Server) postContainerExecStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - execName = vars["name"] - stdin io.ReadCloser - stdout io.Writer - stderr io.Writer - ) - - execStartCheck := &types.ExecStartCheck{} - if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { - return err - } - - if !execStartCheck.Detach { - // Setting up the streaming http interface. - inStream, outStream, err := hijackServer(w) - if err != nil { - return err - } - defer closeStreams(inStream, outStream) - - var errStream io.Writer - - if _, ok := r.Header["Upgrade"]; ok { - fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - if !execStartCheck.Tty { - errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - - stdin = inStream - stdout = outStream - stderr = errStream - } - // Now run the user process in container. - - if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { - logrus.Errorf("Error starting exec command in container %s: %s", execName, err) - return err - } - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *Server) postContainerExecResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return err - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return err - } - - return s.daemon.ContainerExecResize(vars["name"], height, width) -} - -func (s *Server) optionsHandler(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.WriteHeader(http.StatusOK) - return nil -} func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) w.Header().Add("Access-Control-Allow-Origin", corsHeaders) w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") - w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") -} - -func (s *Server) ping(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - _, err := w.Write([]byte{'O', 'K'}) - return err + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") } func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) { @@ -1578,150 +139,80 @@ func (s *Server) initTCPSocket(addr string) (l net.Listener, err error) { return } -func makeHTTPHandler(logging bool, localMethod string, localRoute string, handlerFunc HTTPAPIFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc { +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - // log the request - logrus.Debugf("Calling %s %s", localMethod, localRoute) + // log the handler call + logrus.Debugf("Calling %s %s", r.Method, r.URL.Path) - if logging { - logrus.Infof("%s %s", r.Method, r.RequestURI) + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.Background() + handlerFunc := s.handleWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) } - if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { - userAgent := strings.Split(r.Header.Get("User-Agent"), "/") - - // v1.20 onwards includes the GOOS of the client after the version - // such as Docker/1.7.0 (linux) - if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { - userAgent[1] = strings.Split(userAgent[1], " ")[0] - } - - if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { - logrus.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) - } - } - version := version.Version(mux.Vars(r)["version"]) - if version == "" { - version = api.Version - } - if corsHeaders != "" { - writeCorsHeaders(w, r, corsHeaders) - } - - if version.GreaterThan(api.Version) { - http.Error(w, fmt.Errorf("client is newer than server (client API version: %s, server API version: %s)", version, api.Version).Error(), http.StatusBadRequest) - return - } - if version.LessThan(api.MinVersion) { - http.Error(w, fmt.Errorf("client is too old, minimum supported API version is %s, please upgrade your client to a newer version", api.MinVersion).Error(), http.StatusBadRequest) - return - } - - w.Header().Set("Server", "Docker/"+dockerversion.VERSION+" ("+runtime.GOOS+")") - - if err := handlerFunc(version, w, r, mux.Vars(r)); err != nil { - logrus.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) - httpError(w, err) + if err := handlerFunc(ctx, w, r, vars); err != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.URL.Path, utils.GetErrorMessage(err)) + httputils.WriteError(w, err) } } } +// InitRouters initializes a list of routers for the server. +// Sets those routers as Handler for each server. +func (s *Server) InitRouters(d *daemon.Daemon) { + s.addRouter(local.NewRouter(d)) + s.addRouter(network.NewRouter(d)) + s.addRouter(volume.NewRouter(d)) + + for _, srv := range s.servers { + srv.srv.Handler = s.CreateMux() + } +} + +// addRouter adds a new router to the server. +func (s *Server) addRouter(r router.Router) { + s.routers = append(s.routers, r) +} + +// CreateMux initializes the main router the server uses. // we keep enableCors just for legacy usage, need to be removed in the future -func createRouter(s *Server) *mux.Router { - r := mux.NewRouter() +func (s *Server) CreateMux() *mux.Router { + m := mux.NewRouter() if os.Getenv("DEBUG") != "" { - profilerSetup(r, "/debug/") - } - m := map[string]map[string]HTTPAPIFunc{ - "HEAD": { - "/containers/{name:.*}/archive": s.headContainersArchive, - }, - "GET": { - "/_ping": s.ping, - "/events": s.getEvents, - "/info": s.getInfo, - "/version": s.getVersion, - "/images/json": s.getImagesJSON, - "/images/search": s.getImagesSearch, - "/images/get": s.getImagesGet, - "/images/{name:.*}/get": s.getImagesGet, - "/images/{name:.*}/history": s.getImagesHistory, - "/images/{name:.*}/json": s.getImagesByName, - "/containers/ps": s.getContainersJSON, - "/containers/json": s.getContainersJSON, - "/containers/{name:.*}/export": s.getContainersExport, - "/containers/{name:.*}/changes": s.getContainersChanges, - "/containers/{name:.*}/json": s.getContainersByName, - "/containers/{name:.*}/top": s.getContainersTop, - "/containers/{name:.*}/logs": s.getContainersLogs, - "/containers/{name:.*}/stats": s.getContainersStats, - "/containers/{name:.*}/attach/ws": s.wsContainersAttach, - "/exec/{id:.*}/json": s.getExecByID, - "/containers/{name:.*}/archive": s.getContainersArchive, - }, - "POST": { - "/auth": s.postAuth, - "/commit": s.postCommit, - "/build": s.postBuild, - "/images/create": s.postImagesCreate, - "/images/load": s.postImagesLoad, - "/images/{name:.*}/push": s.postImagesPush, - "/images/{name:.*}/tag": s.postImagesTag, - "/containers/create": s.postContainersCreate, - "/containers/{name:.*}/kill": s.postContainersKill, - "/containers/{name:.*}/pause": s.postContainersPause, - "/containers/{name:.*}/unpause": s.postContainersUnpause, - "/containers/{name:.*}/restart": s.postContainersRestart, - "/containers/{name:.*}/start": s.postContainersStart, - "/containers/{name:.*}/stop": s.postContainersStop, - "/containers/{name:.*}/wait": s.postContainersWait, - "/containers/{name:.*}/resize": s.postContainersResize, - "/containers/{name:.*}/attach": s.postContainersAttach, - "/containers/{name:.*}/copy": s.postContainersCopy, - "/containers/{name:.*}/exec": s.postContainerExecCreate, - "/exec/{name:.*}/start": s.postContainerExecStart, - "/exec/{name:.*}/resize": s.postContainerExecResize, - "/containers/{name:.*}/rename": s.postContainerRename, - }, - "PUT": { - "/containers/{name:.*}/archive": s.putContainersArchive, - }, - "DELETE": { - "/containers/{name:.*}": s.deleteContainers, - "/images/{name:.*}": s.deleteImages, - }, - "OPTIONS": { - "": s.optionsHandler, - }, + profilerSetup(m, "/debug/") } - // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" - // otherwise, all head values will be passed to HTTP handler - corsHeaders := s.cfg.CorsHeaders - if corsHeaders == "" && s.cfg.EnableCors { - corsHeaders = "*" - } + logrus.Debugf("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) - for method, routes := range m { - for route, fct := range routes { - logrus.Debugf("Registering %s, %s", method, route) - // NOTE: scope issue, make sure the variables are local and won't be changed - localRoute := route - localFct := fct - localMethod := method - - // build the handler function - f := makeHTTPHandler(s.cfg.Logging, localMethod, localRoute, localFct, corsHeaders, version.Version(s.cfg.Version)) - - // add the new route - if localRoute == "" { - r.Methods(localMethod).HandlerFunc(f) - } else { - r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) - r.Path(localRoute).Methods(localMethod).HandlerFunc(f) - } + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) } } - return r + return m +} + +// AcceptConnections allows clients to connect to the API server. +// Referenced Daemon is notified about this server, and waits for the +// daemon acknowledgement before the incoming connections are accepted. +func (s *Server) AcceptConnections() { + // close the lock so the listeners start accepting connections + select { + case <-s.start: + default: + close(s.start) + } } diff --git a/vendor/github.com/docker/docker/api/server/server_experimental.go b/vendor/github.com/docker/docker/api/server/server_experimental.go deleted file mode 100644 index 06f55013..00000000 --- a/vendor/github.com/docker/docker/api/server/server_experimental.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build experimental - -package server - -func (s *Server) registerSubRouter() { - httpHandler := s.daemon.NetworkApiRouter() - - subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter() - subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) - subrouter = s.router.PathPrefix("/networks").Subrouter() - subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) - - subrouter = s.router.PathPrefix("/v{version:[0-9.]+}/services").Subrouter() - subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) - subrouter = s.router.PathPrefix("/services").Subrouter() - subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) -} diff --git a/vendor/github.com/docker/docker/api/server/server_stub.go b/vendor/github.com/docker/docker/api/server/server_stub.go deleted file mode 100644 index 160c2922..00000000 --- a/vendor/github.com/docker/docker/api/server/server_stub.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !experimental - -package server - -func (s *Server) registerSubRouter() { -} diff --git a/vendor/github.com/docker/docker/api/server/server_test.go b/vendor/github.com/docker/docker/api/server/server_test.go new file mode 100644 index 00000000..f3256c31 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/server_test.go @@ -0,0 +1,34 @@ +package server + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/docker/api/server/httputils" + + "golang.org/x/net/context" +) + +func TestMiddlewares(t *testing.T) { + cfg := &Config{} + srv := &Server{ + cfg: cfg, + } + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + return nil + } + + handlerFunc := srv.handleWithGlobalMiddlewares(localHandler) + if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/api/server/server_unix.go b/vendor/github.com/docker/docker/api/server/server_unix.go index 84df45bc..c3d4def8 100644 --- a/vendor/github.com/docker/docker/api/server/server_unix.go +++ b/vendor/github.com/docker/docker/api/server/server_unix.go @@ -9,36 +9,25 @@ import ( "strconv" "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon" "github.com/docker/docker/pkg/sockets" - "github.com/docker/docker/pkg/systemd" - "github.com/docker/docker/pkg/version" - "github.com/docker/docker/runconfig" "github.com/docker/libnetwork/portallocator" + + systemdActivation "github.com/coreos/go-systemd/activation" ) -const ( - // See http://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 - linuxMinCPUShares = 2 - linuxMaxCPUShares = 262144 -) - -// newServer sets up the required serverClosers and does protocol specific checking. -func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { +// newServer sets up the required HTTPServers and does protocol specific checking. +// newServer does not set any muxers, you should set it later to Handler field +func (s *Server) newServer(proto, addr string) ([]*HTTPServer, error) { var ( err error ls []net.Listener ) switch proto { case "fd": - ls, err = systemd.ListenFD(addr) + ls, err = listenFD(addr) if err != nil { return nil, err } - // We don't want to start serving on these sockets until the - // daemon is initialized and installed. Otherwise required handlers - // won't be ready. - <-s.start case "tcp": l, err := s.initTCPSocket(addr) if err != nil { @@ -54,12 +43,11 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { default: return nil, fmt.Errorf("Invalid protocol format: %q", proto) } - var res []serverCloser + var res []*HTTPServer for _, l := range ls { res = append(res, &HTTPServer{ &http.Server{ - Addr: addr, - Handler: s.router, + Addr: addr, }, l, }) @@ -67,22 +55,6 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { return res, nil } -// AcceptConnections allows clients to connect to the API server. -// Referenced Daemon is notified about this server, and waits for the -// daemon acknowledgement before the incoming connections are accepted. -func (s *Server) AcceptConnections(d *daemon.Daemon) { - // Tell the init daemon we are accepting requests - s.daemon = d - s.registerSubRouter() - go systemd.SdNotify("READY=1") - // close the lock so the listeners start accepting connections - select { - case <-s.start: - default: - close(s.start) - } -} - func allocateDaemonPort(addr string) error { host, port, err := net.SplitHostPort(addr) if err != nil { @@ -110,27 +82,42 @@ func allocateDaemonPort(addr string) error { return nil } -func adjustCPUShares(version version.Version, hostConfig *runconfig.HostConfig) { - if version.LessThan("1.19") { - if hostConfig != nil && hostConfig.CPUShares > 0 { - // Handle unsupported CpuShares - if hostConfig.CPUShares < linuxMinCPUShares { - logrus.Warnf("Changing requested CpuShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) - hostConfig.CPUShares = linuxMinCPUShares - } else if hostConfig.CPUShares > linuxMaxCPUShares { - logrus.Warnf("Changing requested CpuShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) - hostConfig.CPUShares = linuxMaxCPUShares - } +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string) ([]net.Listener, error) { + // socket activation + listeners, err := systemdActivation.Listeners(false) + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("No sockets found") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd address, should be number: %v", err) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("Too few socket activated files passed in") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file at fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + logrus.Errorf("Failed to close systemd activated file at fd %d: %v", fdOffset+3, err) } } -} - -// getContainersByNameDownlevel performs processing for pre 1.20 APIs. This -// is only relevant on non-Windows daemons. -func getContainersByNameDownlevel(w http.ResponseWriter, s *Server, namevar string) error { - containerJSONRaw, err := s.daemon.ContainerInspectPre120(namevar) - if err != nil { - return err - } - return writeJSON(w, http.StatusOK, containerJSONRaw) + return []net.Listener{listeners[fdOffset]}, nil } diff --git a/vendor/github.com/docker/docker/api/server/server_windows.go b/vendor/github.com/docker/docker/api/server/server_windows.go index 7c80cf0e..826dd2e0 100644 --- a/vendor/github.com/docker/docker/api/server/server_windows.go +++ b/vendor/github.com/docker/docker/api/server/server_windows.go @@ -6,14 +6,10 @@ import ( "errors" "net" "net/http" - - "github.com/docker/docker/daemon" - "github.com/docker/docker/pkg/version" - "github.com/docker/docker/runconfig" ) // NewServer sets up the required Server and does protocol specific checking. -func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { +func (s *Server) newServer(proto, addr string) ([]*HTTPServer, error) { var ( ls []net.Listener ) @@ -29,12 +25,11 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { return nil, errors.New("Invalid protocol format. Windows only supports tcp.") } - var res []serverCloser + var res []*HTTPServer for _, l := range ls { res = append(res, &HTTPServer{ &http.Server{ - Addr: addr, - Handler: s.router, + Addr: addr, }, l, }) @@ -43,27 +38,6 @@ func (s *Server) newServer(proto, addr string) ([]serverCloser, error) { } -// AcceptConnections allows router to start listening for the incoming requests. -func (s *Server) AcceptConnections(d *daemon.Daemon) { - s.daemon = d - s.registerSubRouter() - // close the lock so the listeners start accepting connections - select { - case <-s.start: - default: - close(s.start) - } -} - func allocateDaemonPort(addr string) error { return nil } - -func adjustCPUShares(version version.Version, hostConfig *runconfig.HostConfig) { -} - -// getContainersByNameDownlevel performs processing for pre 1.20 APIs. This -// is only relevant on non-Windows daemons. -func getContainersByNameDownlevel(w http.ResponseWriter, s *Server, namevar string) error { - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go index 507830ce..ecc144f6 100644 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -1,9 +1,10 @@ -// This package is used for API stability in the types and response to the +// Package types is used for API stability in the types and response to the // consumers of the API stats endpoint. package types import "time" +// ThrottlingData stores CPU throttling stats of one running container type ThrottlingData struct { // Number of periods with throttling active Periods uint64 `json:"periods"` @@ -13,8 +14,8 @@ type ThrottlingData struct { ThrottledTime uint64 `json:"throttled_time"` } -// All CPU stats are aggregated since container inception. -type CpuUsage struct { +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { // Total CPU time consumed. // Units: nanoseconds. TotalUsage uint64 `json:"total_usage"` @@ -29,12 +30,14 @@ type CpuUsage struct { UsageInUsermode uint64 `json:"usage_in_usermode"` } -type CpuStats struct { - CpuUsage CpuUsage `json:"cpu_usage"` +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + CPUUsage CPUUsage `json:"cpu_usage"` SystemUsage uint64 `json:"system_cpu_usage"` ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` } +// MemoryStats aggregates All memory stats since container inception type MemoryStats struct { // current res_counter usage for memory Usage uint64 `json:"usage"` @@ -48,6 +51,7 @@ type MemoryStats struct { Limit uint64 `json:"limit"` } +// BlkioStatEntry is one small entity to store a piece of Blkio stats // TODO Windows: This can be factored out type BlkioStatEntry struct { Major uint64 `json:"major"` @@ -56,6 +60,7 @@ type BlkioStatEntry struct { Value uint64 `json:"value"` } +// BlkioStats stores All IO service stats for data read and write // TODO Windows: This can be factored out type BlkioStats struct { // number of bytes tranferred to and from the block device @@ -69,8 +74,9 @@ type BlkioStats struct { SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` } +// NetworkStats aggregates All network stats of one container // TODO Windows: This will require refactoring -type Network struct { +type NetworkStats struct { RxBytes uint64 `json:"rx_bytes"` RxPackets uint64 `json:"rx_packets"` RxErrors uint64 `json:"rx_errors"` @@ -81,11 +87,19 @@ type Network struct { TxDropped uint64 `json:"tx_dropped"` } +// Stats is Ultimate struct aggregating all types of stats of one container type Stats struct { Read time.Time `json:"read"` - Network Network `json:"network,omitempty"` - PreCpuStats CpuStats `json:"precpu_stats,omitempty"` - CpuStats CpuStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty"` MemoryStats MemoryStats `json:"memory_stats,omitempty"` BlkioStats BlkioStats `json:"blkio_stats,omitempty"` } + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index c99e5a9a..a6cec44a 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -5,7 +5,9 @@ import ( "time" "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/version" + "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" ) @@ -19,35 +21,41 @@ type ContainerCreateResponse struct { Warnings []string `json:"Warnings"` } -// POST /containers/{name:.*}/exec +// ContainerExecCreateResponse contains response of Remote API: +// POST "/containers/{name:.*}/exec" type ContainerExecCreateResponse struct { // ID is the exec ID. ID string `json:"Id"` } -// POST /auth +// AuthResponse contains response of Remote API: +// POST "/auth" type AuthResponse struct { // Status is the authentication status Status string `json:"Status"` } +// ContainerWaitResponse contains response of Remote API: // POST "/containers/"+containerID+"/wait" type ContainerWaitResponse struct { // StatusCode is the status code of the wait job StatusCode int `json:"StatusCode"` } +// ContainerCommitResponse contains response of Remote API: // POST "/commit?container="+containerID type ContainerCommitResponse struct { ID string `json:"Id"` } +// ContainerChange contains response of Remote API: // GET "/containers/{name:.*}/changes" type ContainerChange struct { Kind int Path string } +// ImageHistory contains response of Remote API: // GET "/images/{name:.*}/history" type ImageHistory struct { ID string `json:"Id"` @@ -58,32 +66,39 @@ type ImageHistory struct { Comment string } +// ImageDelete contains response of Remote API: // DELETE "/images/{name:.*}" type ImageDelete struct { Untagged string `json:",omitempty"` Deleted string `json:",omitempty"` } +// Image contains response of Remote API: // GET "/images/json" type Image struct { ID string `json:"Id"` - ParentId string + ParentID string `json:"ParentId"` RepoTags []string RepoDigests []string - Created int - Size int - VirtualSize int + Created int64 + Size int64 + VirtualSize int64 Labels map[string]string } +// GraphDriverData returns Image's graph driver config info +// when calling inspect command type GraphDriverData struct { Name string Data map[string]string } +// ImageInspect contains response of Remote API: // GET "/images/{name:.*}/json" type ImageInspect struct { - Id string + ID string `json:"Id"` + RepoTags []string + RepoDigests []string Parent string Comment string Created string @@ -99,7 +114,8 @@ type ImageInspect struct { GraphDriver GraphDriverData } -// GET "/containers/json" +// Port stores open ports info of container +// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} type Port struct { IP string `json:",omitempty"` PrivatePort int @@ -107,15 +123,18 @@ type Port struct { Type string } +// Container contains response of Remote API: +// GET "/containers/json" type Container struct { ID string `json:"Id"` Names []string Image string + ImageID string Command string - Created int + Created int64 Ports []Port - SizeRw int `json:",omitempty"` - SizeRootFs int `json:",omitempty"` + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` Labels map[string]string Status string HostConfig struct { @@ -123,32 +142,35 @@ type Container struct { } } +// CopyConfig contains request body of Remote API: // POST "/containers/"+containerID+"/copy" type CopyConfig struct { Resource string } // ContainerPathStat is used to encode the header from -// GET /containers/{name:.*}/archive -// "name" is the file or directory name. -// "path" is the absolute path to the resource in the container. +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. type ContainerPathStat struct { - Name string `json:"name"` - Path string `json:"path"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` } +// ContainerProcessList contains response of Remote API: // GET "/containers/{name:.*}/top" type ContainerProcessList struct { Processes [][]string Titles []string } +// Version contains response of Remote API: +// GET "/version" type Version struct { Version string - ApiVersion version.Version + APIVersion version.Version `json:"ApiVersion"` GitCommit string GoVersion string Os string @@ -158,6 +180,7 @@ type Version struct { BuildTime string `json:",omitempty"` } +// Info contains response of Remote API: // GET "/info" type Info struct { ID string @@ -167,11 +190,11 @@ type Info struct { DriverStatus [][2]string MemoryLimit bool SwapLimit bool - CpuCfsPeriod bool - CpuCfsQuota bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` IPv4Forwarding bool BridgeNfIptables bool - BridgeNfIp6tables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` Debug bool NFd int OomKillDisable bool @@ -183,21 +206,24 @@ type Info struct { KernelVersion string OperatingSystem string IndexServerAddress string - RegistryConfig interface{} + RegistryConfig *registry.ServiceConfig InitSha1 string InitPath string NCPU int MemTotal int64 DockerRootDir string - HttpProxy string - HttpsProxy string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` NoProxy string Name string Labels []string ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string } -// This struct is a temp struct used by execStart +// ExecStartCheck is a temp struct used by execStart // Config fields is part of ExecConfig in runconfig package type ExecStartCheck struct { // ExecStart will first check if it's detached @@ -206,7 +232,10 @@ type ExecStartCheck struct { Tty bool } +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command type ContainerState struct { + Status string Running bool Paused bool Restarting bool @@ -219,15 +248,15 @@ type ContainerState struct { FinishedAt string } +// ContainerJSONBase contains response of Remote API: // GET "/containers/{name:.*}/json" type ContainerJSONBase struct { - Id string + ID string `json:"Id"` Created string Path string Args []string State *ContainerState Image string - NetworkSettings *network.Settings ResolvConfPath string HostnamePath string HostsPath string @@ -235,38 +264,56 @@ type ContainerJSONBase struct { Name string RestartCount int Driver string - ExecDriver string MountLabel string ProcessLabel string AppArmorProfile string ExecIDs []string HostConfig *runconfig.HostConfig GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` } +// ContainerJSON is newly used struct along with MountPoint type ContainerJSON struct { *ContainerJSONBase - Mounts []MountPoint - Config *runconfig.Config + Mounts []MountPoint + Config *runconfig.Config + NetworkSettings *NetworkSettings } -// backcompatibility struct along with ContainerConfig. Note this is not -// used by the Windows daemon. -type ContainerJSONPre120 struct { - *ContainerJSONBase - Volumes map[string]string - VolumesRW map[string]bool - Config *ContainerConfig +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings } -type ContainerConfig struct { - *runconfig.Config +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} - // backward compatibility, they now live in HostConfig - Memory int64 - MemorySwap int64 - CpuShares int64 - Cpuset string +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string + Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + IPAddress string + IPPrefixLen int + IPv6Gateway string + MacAddress string } // MountPoint represents a mount point configuration inside the container. @@ -278,3 +325,68 @@ type MountPoint struct { Mode string RW bool } + +// Volume represents the configuration of a volume for the remote API +type Volume struct { + Name string // Name is the name of the volume + Driver string // Driver is the Driver name used to create the volume + Mountpoint string // Mountpoint is the location on disk of the volume +} + +// VolumesListResponse contains the response for the remote API: +// GET "/volumes" +type VolumesListResponse struct { + Volumes []*Volume // Volumes is the list of volumes being returned +} + +// VolumeCreateRequest contains the response for the remote API: +// POST "/volumes/create" +type VolumeCreateRequest struct { + Name string // Name is the requested name of the volume + Driver string // Driver is the name of the driver that should be used to create the volume + DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string + ID string `json:"Id"` + Scope string + Driver string + IPAM network.IPAM + Containers map[string]EndpointResource + Options map[string]string +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + Name string + CheckDuplicate bool + Driver string + IPAM network.IPAM + Options map[string]string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string +} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 00000000..76c516e6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +## Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +### Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go b/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go new file mode 100644 index 00000000..a66aa9d5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/runconfig" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *runconfig.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go b/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go new file mode 100644 index 00000000..0facbb66 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/runconfig" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *runconfig.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for API prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/vendor/github.com/docker/docker/cliconfig/config.go b/vendor/github.com/docker/docker/cliconfig/config.go index d00bc716..bdd75871 100644 --- a/vendor/github.com/docker/docker/cliconfig/config.go +++ b/vendor/github.com/docker/docker/cliconfig/config.go @@ -4,17 +4,17 @@ import ( "encoding/base64" "encoding/json" "fmt" + "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/system" ) const ( - // ConfigFile is the name of config file + // ConfigFileName is the name of config file ConfigFileName = "config.json" oldConfigfile = ".dockercfg" @@ -70,6 +70,88 @@ func NewConfigFile(fn string) *ConfigFile { } } +// LegacyLoadFromReader reads the non-nested configuration data given and sets up the +// auth config information with given directory and populates the receiver object +func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { + b, err := ioutil.ReadAll(configData) + if err != nil { + return err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return fmt.Errorf("The Auth config file is empty") + } + authConfig := AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = DecodeAuth(origAuth[1]) + if err != nil { + return err + } + origEmail := strings.Split(arr[1], " = ") + if len(origEmail) != 2 { + return fmt.Errorf("Invalid Auth config file") + } + authConfig.Email = origEmail[1] + authConfig.ServerAddress = defaultIndexserver + configFile.AuthConfigs[defaultIndexserver] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = DecodeAuth(authConfig.Auth) + if err != nil { + return err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return nil +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(&configFile); err != nil { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + ac.Username, ac.Password, err = DecodeAuth(ac.Auth) + if err != nil { + return err + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return nil +} + +// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from +// a non-nested reader +func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) { + configFile := ConfigFile{ + AuthConfigs: make(map[string]AuthConfig), + } + err := configFile.LegacyLoadFromReader(configData) + return &configFile, err +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader +func LoadFromReader(configData io.Reader) (*ConfigFile, error) { + configFile := ConfigFile{ + AuthConfigs: make(map[string]AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + // Load reads the configuration files in the given directory, and sets up // the auth config information and return values. // FIXME: use the internal golang config parser @@ -90,22 +172,8 @@ func Load(configDir string) (*ConfigFile, error) { return &configFile, err } defer file.Close() - - if err := json.NewDecoder(file).Decode(&configFile); err != nil { - return &configFile, err - } - - for addr, ac := range configFile.AuthConfigs { - ac.Username, ac.Password, err = DecodeAuth(ac.Auth) - if err != nil { - return &configFile, err - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - - return &configFile, nil + err = configFile.LoadFromReader(file) + return &configFile, err } else if !os.IsNotExist(err) { // if file is there but we can't stat it for any reason other // than it doesn't exist then stop @@ -117,49 +185,18 @@ func Load(configDir string) (*ConfigFile, error) { if _, err := os.Stat(confFile); err != nil { return &configFile, nil //missing file is not an error } - - b, err := ioutil.ReadFile(confFile) + file, err := os.Open(confFile) if err != nil { return &configFile, err } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return &configFile, fmt.Errorf("The Auth config file is empty") - } - authConfig := AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return &configFile, fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = DecodeAuth(origAuth[1]) - if err != nil { - return &configFile, err - } - origEmail := strings.Split(arr[1], " = ") - if len(origEmail) != 2 { - return &configFile, fmt.Errorf("Invalid Auth config file") - } - authConfig.Email = origEmail[1] - authConfig.ServerAddress = defaultIndexserver - configFile.AuthConfigs[defaultIndexserver] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = DecodeAuth(authConfig.Auth) - if err != nil { - return &configFile, err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return &configFile, nil + defer file.Close() + err = configFile.LegacyLoadFromReader(file) + return &configFile, err } -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() error { +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { // Encode sensitive data into a new/temp struct tmpAuthConfigs := make(map[string]AuthConfig, len(configFile.AuthConfigs)) for k, authConfig := range configFile.AuthConfigs { @@ -180,16 +217,25 @@ func (configFile *ConfigFile) Save() error { if err != nil { return err } + _, err = writer.Write(data) + return err +} - if err := system.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil { - return err +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() error { + if configFile.Filename() == "" { + return fmt.Errorf("Can't save config with empty filename") } - if err := ioutil.WriteFile(configFile.filename, data, 0600); err != nil { + if err := os.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil { return err } - - return nil + f, err := os.OpenFile(configFile.filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + return configFile.SaveToWriter(f) } // Filename returns the name of the configuration file diff --git a/vendor/github.com/docker/docker/cliconfig/config_test.go b/vendor/github.com/docker/docker/cliconfig/config_test.go index 25fb58a4..dcf368c5 100644 --- a/vendor/github.com/docker/docker/cliconfig/config_test.go +++ b/vendor/github.com/docker/docker/cliconfig/config_test.go @@ -4,15 +4,41 @@ import ( "io/ioutil" "os" "path/filepath" - "runtime" "strings" "testing" "github.com/docker/docker/pkg/homedir" ) +func TestEmptyConfigDir(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + SetConfigDir(tmpHome) + + config, err := Load("") + if err != nil { + t.Fatalf("Failed loading on empty config dir: %q", err) + } + + expectedConfigFilename := filepath.Join(tmpHome, ConfigFileName) + if config.Filename() != expectedConfigFilename { + t.Fatalf("Expected config filename %s, got %s", expectedConfigFilename, config.Filename()) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + func TestMissingFile(t *testing.T) { - tmpHome, _ := ioutil.TempDir("", "config-test") + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) config, err := Load(tmpHome) if err != nil { @@ -20,19 +46,15 @@ func TestMissingFile(t *testing.T) { } // Now save it and make sure it shows up in new form - err = config.Save() - if err != nil { - t.Fatalf("Failed to save: %q", err) - } - - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if !strings.Contains(string(buf), `"auths":`) { - t.Fatalf("Should have save in new form: %s", string(buf)) - } + saveConfigAndValidateNewFormat(t, config, tmpHome) } func TestSaveFileToDirs(t *testing.T) { - tmpHome, _ := ioutil.TempDir("", "config-test") + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) tmpHome += "/.docker" @@ -42,32 +64,38 @@ func TestSaveFileToDirs(t *testing.T) { } // Now save it and make sure it shows up in new form - err = config.Save() - if err != nil { - t.Fatalf("Failed to save: %q", err) - } - - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if !strings.Contains(string(buf), `"auths":`) { - t.Fatalf("Should have save in new form: %s", string(buf)) - } + saveConfigAndValidateNewFormat(t, config, tmpHome) } func TestEmptyFile(t *testing.T) { - tmpHome, _ := ioutil.TempDir("", "config-test") - fn := filepath.Join(tmpHome, ConfigFileName) - ioutil.WriteFile(fn, []byte(""), 0600) + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) - _, err := Load(tmpHome) + fn := filepath.Join(tmpHome, ConfigFileName) + if err := ioutil.WriteFile(fn, []byte(""), 0600); err != nil { + t.Fatal(err) + } + + _, err = Load(tmpHome) if err == nil { t.Fatalf("Was supposed to fail") } } func TestEmptyJson(t *testing.T) { - tmpHome, _ := ioutil.TempDir("", "config-test") + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + fn := filepath.Join(tmpHome, ConfigFileName) - ioutil.WriteFile(fn, []byte("{}"), 0600) + if err := ioutil.WriteFile(fn, []byte("{}"), 0600); err != nil { + t.Fatal(err) + } config, err := Load(tmpHome) if err != nil { @@ -75,23 +103,119 @@ func TestEmptyJson(t *testing.T) { } // Now save it and make sure it shows up in new form - err = config.Save() - if err != nil { - t.Fatalf("Failed to save: %q", err) + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestOldInvalidsAuth(t *testing.T) { + invalids := map[string]string{ + `username = test`: "The Auth config file is empty", + `username +password +email`: "Invalid Auth config file", + `username = test +email`: "Invalid auth configuration file", + `username = am9lam9lOmhlbGxv +email`: "Invalid Auth config file", } - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if !strings.Contains(string(buf), `"auths":`) { - t.Fatalf("Should have save in new form: %s", string(buf)) + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + for content, expectedError := range invalids { + fn := filepath.Join(tmpHome, oldConfigfile) + if err := ioutil.WriteFile(fn, []byte(content), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err == nil || err.Error() != expectedError { + t.Fatalf("Should have failed, got: %q, %q", config, err) + } + + } +} + +func TestOldValidAuth(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `username = am9lam9lOmhlbGxv +email = user@example.com` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatal(err) + } + + // defaultIndexserver is https://index.docker.io/v1/ + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + if !strings.Contains(configStr, "user@example.com") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +func TestOldJsonInvalid(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `{"https://index.docker.io/v1/":{"auth":"test","email":"user@example.com"}}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err == nil || err.Error() != "Invalid auth configuration file" { + t.Fatalf("Expected an error got : %v, %v", config, err) } } func TestOldJson(t *testing.T) { - if runtime.GOOS == "windows" { - return + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) } - - tmpHome, _ := ioutil.TempDir("", "config-test") defer os.RemoveAll(tmpHome) homeKey := homedir.Key() @@ -102,7 +226,9 @@ func TestOldJson(t *testing.T) { fn := filepath.Join(tmpHome, oldConfigfile) js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - ioutil.WriteFile(fn, []byte(js), 0600) + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } config, err := Load(tmpHome) if err != nil { @@ -115,23 +241,25 @@ func TestOldJson(t *testing.T) { } // Now save it and make sure it shows up in new form - err = config.Save() - if err != nil { - t.Fatalf("Failed to save: %q", err) - } + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if !strings.Contains(string(buf), `"auths":`) || - !strings.Contains(string(buf), "user@example.com") { - t.Fatalf("Should have save in new form: %s", string(buf)) + if !strings.Contains(configStr, "user@example.com") { + t.Fatalf("Should have save in new form: %s", configStr) } } func TestNewJson(t *testing.T) { - tmpHome, _ := ioutil.TempDir("", "config-test") + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + fn := filepath.Join(tmpHome, ConfigFileName) js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` - ioutil.WriteFile(fn, []byte(js), 0600) + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } config, err := Load(tmpHome) if err != nil { @@ -144,26 +272,28 @@ func TestNewJson(t *testing.T) { } // Now save it and make sure it shows up in new form - err = config.Save() - if err != nil { - t.Fatalf("Failed to save: %q", err) - } + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if !strings.Contains(string(buf), `"auths":`) || - !strings.Contains(string(buf), "user@example.com") { - t.Fatalf("Should have save in new form: %s", string(buf)) + if !strings.Contains(configStr, "user@example.com") { + t.Fatalf("Should have save in new form: %s", configStr) } } func TestJsonWithPsFormat(t *testing.T) { - tmpHome, _ := ioutil.TempDir("", "config-test") + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + fn := filepath.Join(tmpHome, ConfigFileName) js := `{ "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" }` - ioutil.WriteFile(fn, []byte(js), 0600) + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } config, err := Load(tmpHome) if err != nil { @@ -175,14 +305,154 @@ func TestJsonWithPsFormat(t *testing.T) { } // Now save it and make sure it shows up in new form - err = config.Save() + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"psFormat":`) || + !strings.Contains(configStr, "{{.ID}}") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +// Save it and make sure it shows up in new form +func saveConfigAndValidateNewFormat(t *testing.T, config *ConfigFile, homeFolder string) string { + err := config.Save() if err != nil { t.Fatalf("Failed to save: %q", err) } + buf, err := ioutil.ReadFile(filepath.Join(homeFolder, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } + return string(buf) +} + +func TestConfigDir(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + if ConfigDir() == tmpHome { + t.Fatalf("Expected ConfigDir to be different than %s by default, but was the same", tmpHome) + } + + // Update configDir + SetConfigDir(tmpHome) + + if ConfigDir() != tmpHome { + t.Fatalf("Expected ConfigDir to %s, but was %s", tmpHome, ConfigDir()) + } +} + +func TestConfigFile(t *testing.T) { + configFilename := "configFilename" + configFile := NewConfigFile(configFilename) + + if configFile.Filename() != configFilename { + t.Fatalf("Expected %s, got %s", configFilename, configFile.Filename()) + } +} + +func TestJsonReaderNoFile(t *testing.T) { + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` + + config, err := LoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + +} + +func TestOldJsonReaderNoFile(t *testing.T) { + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + + config, err := LegacyLoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } +} + +func TestJsonWithPsFormatNoFile(t *testing.T) { + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + config, err := LoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { + t.Fatalf("Unknown ps format: %s\n", config.PsFormat) + } + +} + +func TestJsonSaveWithNoFile(t *testing.T) { + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + config, err := LoadFromReader(strings.NewReader(js)) + err = config.Save() + if err == nil { + t.Fatalf("Expected error. File should not have been able to save with no file name.") + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatalf("Failed to create a temp dir: %q", err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + err = config.SaveToWriter(f) + if err != nil { + t.Fatalf("Failed saving to file: %q", err) + } buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if !strings.Contains(string(buf), `"psFormat":`) || - !strings.Contains(string(buf), "{{.ID}}") { + if !strings.Contains(string(buf), `"auths":`) || + !strings.Contains(string(buf), "user@example.com") { + t.Fatalf("Should have save in new form: %s", string(buf)) + } + +} +func TestLegacyJsonSaveWithNoFile(t *testing.T) { + + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + config, err := LegacyLoadFromReader(strings.NewReader(js)) + err = config.Save() + if err == nil { + t.Fatalf("Expected error. File should not have been able to save with no file name.") + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatalf("Failed to create a temp dir: %q", err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + err = config.SaveToWriter(f) + if err != nil { + t.Fatalf("Failed saving to file: %q", err) + } + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if !strings.Contains(string(buf), `"auths":`) || + !strings.Contains(string(buf), "user@example.com") { t.Fatalf("Should have save in new form: %s", string(buf)) } } diff --git a/vendor/github.com/docker/docker/daemon/README.md b/vendor/github.com/docker/docker/daemon/README.md index 64bfcb55..1778983f 100644 --- a/vendor/github.com/docker/docker/daemon/README.md +++ b/vendor/github.com/docker/docker/daemon/README.md @@ -3,7 +3,6 @@ This directory contains code pertaining to running containers and storing images Code pertaining to running containers: - execdriver - - networkdriver Code pertaining to storing images: diff --git a/vendor/github.com/docker/docker/daemon/archive.go b/vendor/github.com/docker/docker/daemon/archive.go index 272112ce..f4a82478 100644 --- a/vendor/github.com/docker/docker/daemon/archive.go +++ b/vendor/github.com/docker/docker/daemon/archive.go @@ -5,6 +5,7 @@ import ( "io" "os" "path/filepath" + "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/archive" @@ -29,7 +30,7 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err res = res[1:] } - return container.Copy(res) + return daemon.containerCopy(container, res) } // ContainerStatPath stats the filesystem resource at the specified path in the @@ -40,7 +41,7 @@ func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.C return nil, err } - return container.StatPath(path) + return daemon.containerStatPath(container, path) } // ContainerArchivePath creates an archive of the filesystem resource at the @@ -52,7 +53,7 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io return nil, nil, err } - return container.ArchivePath(path) + return daemon.containerArchivePath(container, path) } // ContainerExtractToDir extracts the given archive to the specified location @@ -67,65 +68,98 @@ func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNon return err } - return container.ExtractToDir(path, noOverwriteDirNonDir, content) + return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) } -// StatPath stats the filesystem resource at the specified path in this -// container. Returns stat info about the resource. -func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) { - container.Lock() - defer container.Unlock() - - if err = container.Mount(); err != nil { - return nil, err - } - defer container.Unmount() - - err = container.mountVolumes() - defer container.UnmountVolumes(true) - if err != nil { - return nil, err - } - +// resolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// a error if the path points to outside the container's rootfs. +func (container *Container) resolvePath(path string) (resolvedPath, absPath string, err error) { // Consider the given path as an absolute path in the container. - absPath := path - if !filepath.IsAbs(absPath) { - absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(os.PathSeparator), path), path) - } + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) - resolvedPath, err := container.GetResourcePath(absPath) + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) if err != nil { - return nil, err + return "", "", err } - // A trailing "." or separator has important meaning. For example, if - // `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")` - // will stat the link itself, while `os.Lstat("foo/")` will stat the link - // target. If the basename of the path is ".", it means to archive the - // contents of the directory with "." as the first path component rather - // than the name of the directory. This would cause extraction of the - // archive to *not* make another directory, but instead use the current - // directory. - resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath) + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + return resolvedPath, absPath, nil +} + +// statPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) statPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { lstat, err := os.Lstat(resolvedPath) if err != nil { return nil, err } + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.basefs, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + return &types.ContainerPathStat{ - Name: lstat.Name(), - Path: absPath, - Size: lstat.Size(), - Mode: lstat.Mode(), - Mtime: lstat.ModTime(), + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, }, nil } -// ArchivePath creates an archive of the filesystem resource at the specified +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.unmountVolumes(true) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.resolvePath(path) + if err != nil { + return nil, err + } + + return container.statPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified // path in this container. Returns a tar archive of the resource and stat info // about the resource. -func (container *Container) ArchivePath(path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { +func (daemon *Daemon) containerArchivePath(container *Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { container.Lock() defer func() { @@ -137,117 +171,95 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta } }() - if err = container.Mount(); err != nil { + if err = daemon.Mount(container); err != nil { return nil, nil, err } defer func() { if err != nil { // unmount any volumes - container.UnmountVolumes(true) + container.unmountVolumes(true) // unmount the container's rootfs - container.Unmount() + daemon.Unmount(container) } }() - if err = container.mountVolumes(); err != nil { + if err = daemon.mountVolumes(container); err != nil { return nil, nil, err } - // Consider the given path as an absolute path in the container. - absPath := path - if !filepath.IsAbs(absPath) { - absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(os.PathSeparator), path), path) - } - - resolvedPath, err := container.GetResourcePath(absPath) + resolvedPath, absPath, err := container.resolvePath(path) if err != nil { return nil, nil, err } - // A trailing "." or separator has important meaning. For example, if - // `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")` - // will stat the link itself, while `os.Lstat("foo/")` will stat the link - // target. If the basename of the path is ".", it means to archive the - // contents of the directory with "." as the first path component rather - // than the name of the directory. This would cause extraction of the - // archive to *not* make another directory, but instead use the current - // directory. - resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath) - - lstat, err := os.Lstat(resolvedPath) + stat, err = container.statPath(resolvedPath, absPath) if err != nil { return nil, nil, err } - stat = &types.ContainerPathStat{ - Name: lstat.Name(), - Path: absPath, - Size: lstat.Size(), - Mode: lstat.Mode(), - Mtime: lstat.ModTime(), - } - - data, err := archive.TarResource(resolvedPath) + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) if err != nil { return nil, nil, err } content = ioutils.NewReadCloserWrapper(data, func() error { err := data.Close() - container.UnmountVolumes(true) - container.Unmount() + container.unmountVolumes(true) + daemon.Unmount(container) container.Unlock() return err }) - container.LogEvent("archive-path") + daemon.LogContainerEvent(container, "archive-path") return content, stat, nil } -// ExtractToDir extracts the given tar archive to the specified location in the +// containerExtractToDir extracts the given tar archive to the specified location in the // filesystem of this container. The given path must be of a directory in the // container. If it is not, the error will be ErrExtractPointNotDirectory. If // noOverwriteDirNonDir is true then it will be an error if unpacking the // given content would cause an existing directory to be replaced with a non- // directory and vice versa. -func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { +func (daemon *Daemon) containerExtractToDir(container *Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { container.Lock() defer container.Unlock() - if err = container.Mount(); err != nil { + if err = daemon.Mount(container); err != nil { return err } - defer container.Unmount() + defer daemon.Unmount(container) - err = container.mountVolumes() - defer container.UnmountVolumes(true) + err = daemon.mountVolumes(container) + defer container.unmountVolumes(true) if err != nil { return err } - // Consider the given path as an absolute path in the container. - absPath := path - if !filepath.IsAbs(absPath) { - absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(os.PathSeparator), path), path) - } + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.resolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. resolvedPath, err := container.GetResourcePath(absPath) if err != nil { return err } - // A trailing "." or separator has important meaning. For example, if - // `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")` - // will stat the link itself, while `os.Lstat("foo/")` will stat the link - // target. If the basename of the path is ".", it means to archive the - // contents of the directory with "." as the first path component rather - // than the name of the directory. This would cause extraction of the - // archive to *not* make another directory, but instead use the current - // directory. - resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath) - stat, err := os.Lstat(resolvedPath) if err != nil { return err @@ -257,22 +269,42 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, return ErrExtractPointNotDirectory } - baseRel, err := filepath.Rel(container.basefs, resolvedPath) - if err != nil { - return err - } - absPath = filepath.Join(string(os.PathSeparator), baseRel) - // Need to check if the path is in a volume. If it is, it cannot be in a // read-only volume. If it is not in a volume, the container cannot be // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.basefs) { + baseRel = resolvedPath[len(container.basefs):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.basefs, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + toVolume, err := checkIfPathIsInAVolume(container, absPath) if err != nil { return err } if !toVolume && container.hostConfig.ReadonlyRootfs { - return ErrContainerRootfsReadonly + return ErrRootFSReadOnly } options := &archive.TarOptions{ @@ -286,7 +318,72 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, return err } - container.LogEvent("extract-to-dir") + daemon.LogContainerEvent(container, "extract-to-dir") return nil } + +func (daemon *Daemon) containerCopy(container *Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.unmountVolumes(true) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.unmountVolumes(true) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} diff --git a/vendor/github.com/docker/docker/daemon/archive_unix.go b/vendor/github.com/docker/docker/daemon/archive_unix.go index 100fc788..7588d76d 100644 --- a/vendor/github.com/docker/docker/daemon/archive_unix.go +++ b/vendor/github.com/docker/docker/daemon/archive_unix.go @@ -8,7 +8,7 @@ package daemon func checkIfPathIsInAVolume(container *Container, absPath string) (bool, error) { var toVolume bool for _, mnt := range container.MountPoints { - if toVolume = mnt.hasResource(absPath); toVolume { + if toVolume = mnt.HasResource(absPath); toVolume { if mnt.RW { break } diff --git a/vendor/github.com/docker/docker/daemon/attach.go b/vendor/github.com/docker/docker/daemon/attach.go index 79ffa8df..047828b1 100644 --- a/vendor/github.com/docker/docker/daemon/attach.go +++ b/vendor/github.com/docker/docker/daemon/attach.go @@ -2,10 +2,14 @@ package daemon import ( "io" + "time" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" "github.com/docker/docker/pkg/stdcopy" ) +// ContainerAttachWithLogsConfig holds the streams to use when connecting to a container to view logs. type ContainerAttachWithLogsConfig struct { InStream io.ReadCloser OutStream io.Writer @@ -13,7 +17,13 @@ type ContainerAttachWithLogsConfig struct { Logs, Stream bool } -func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *ContainerAttachWithLogsConfig) error { +// ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig. +func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerAttachWithLogsConfig) error { + container, err := daemon.Get(prefixOrName) + if err != nil { + return err + } + var errStream io.Writer if !container.Config.Tty { @@ -36,15 +46,78 @@ func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *Container stderr = errStream } - return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream) + return daemon.attachWithLogs(container, stdin, stdout, stderr, c.Logs, c.Stream) } +// ContainerWsAttachWithLogsConfig attach with websockets, since all +// stream data is delegated to the websocket to handle there. type ContainerWsAttachWithLogsConfig struct { InStream io.ReadCloser OutStream, ErrStream io.Writer Logs, Stream bool } -func (daemon *Daemon) ContainerWsAttachWithLogs(container *Container, c *ContainerWsAttachWithLogsConfig) error { - return container.AttachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream) +// ContainerWsAttachWithLogs websocket connection +func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *ContainerWsAttachWithLogsConfig) error { + container, err := daemon.Get(prefixOrName) + if err != nil { + return err + } + return daemon.attachWithLogs(container, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream) +} + +func (daemon *Daemon) attachWithLogs(container *Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error { + if logs { + logDriver, err := daemon.getLogger(container) + if err != nil { + return err + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && stdout != nil { + stdout.Write(msg.Line) + } + if msg.Source == "stderr" && stderr != nil { + stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(container, "attach") + + //stream + if stream { + var stdinPipe io.ReadCloser + if stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debugf("Closing buffered stdin pipe") + io.Copy(w, stdin) + }() + stdinPipe = r + } + <-container.Attach(stdinPipe, stdout, stderr) + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if container.Config.StdinOnce && !container.Config.Tty { + container.WaitStop(-1 * time.Second) + } + } + return nil } diff --git a/vendor/github.com/docker/docker/daemon/changes.go b/vendor/github.com/docker/docker/daemon/changes.go index 55b230b9..181ac5a9 100644 --- a/vendor/github.com/docker/docker/daemon/changes.go +++ b/vendor/github.com/docker/docker/daemon/changes.go @@ -9,5 +9,7 @@ func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { return nil, err } - return container.Changes() + container.Lock() + defer container.Unlock() + return daemon.changes(container) } diff --git a/vendor/github.com/docker/docker/daemon/commit.go b/vendor/github.com/docker/docker/daemon/commit.go index 5921d77e..61808eab 100644 --- a/vendor/github.com/docker/docker/daemon/commit.go +++ b/vendor/github.com/docker/docker/daemon/commit.go @@ -1,28 +1,53 @@ package daemon import ( + "fmt" + "runtime" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/runconfig" ) +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. type ContainerCommitConfig struct { Pause bool Repo string Tag string Author string Comment string - Config *runconfig.Config + // merge container config into commit config before commit + MergeConfigs bool + Config *runconfig.Config } // Commit creates a new filesystem image from the current state of a container. -// The image can optionally be tagged into a repository -func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) { - if c.Pause && !container.IsPaused() { - container.Pause() - defer container.Unpause() +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *ContainerCommitConfig) (*image.Image, error) { + container, err := daemon.Get(name) + if err != nil { + return nil, err } - rwTar, err := container.ExportRw() + // It is not possible to commit a running container on Windows + if runtime.GOOS == "windows" && container.IsRunning() { + return nil, fmt.Errorf("Windows does not support commit of a running container") + } + + if c.Pause && !container.isPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + if c.MergeConfigs { + if err := runconfig.Merge(c.Config, container.Config); err != nil { + return nil, err + } + } + + rwTar, err := daemon.exportContainerRw(container) if err != nil { return nil, err } @@ -33,18 +58,7 @@ func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*i }() // Create a new image from the container's base layers + a new layer from container changes - var ( - containerID, parentImageID string - containerConfig *runconfig.Config - ) - - if container != nil { - containerID = container.ID - parentImageID = container.ImageID - containerConfig = container.Config - } - - img, err := daemon.graph.Create(rwTar, containerID, parentImageID, c.Comment, c.Author, containerConfig, c.Config) + img, err := daemon.graph.Create(rwTar, container.ID, container.ImageID, c.Comment, c.Author, container.Config, c.Config) if err != nil { return nil, err } @@ -55,6 +69,19 @@ func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*i return img, err } } - container.LogEvent("commit") + + daemon.LogContainerEvent(container, "commit") return img, nil } + +func (daemon *Daemon) exportContainerRw(container *Container) (archive.Archive, error) { + archive, err := daemon.diff(container) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + return err + }), + nil +} diff --git a/vendor/github.com/docker/docker/daemon/config.go b/vendor/github.com/docker/docker/daemon/config.go index 56addfd4..bc301a53 100644 --- a/vendor/github.com/docker/docker/daemon/config.go +++ b/vendor/github.com/docker/docker/daemon/config.go @@ -18,9 +18,9 @@ type CommonConfig struct { Bridge bridgeConfig // Bridge holds bridge network specific configuration. Context map[string][]string DisableBridge bool - Dns []string - DnsSearch []string - ExecDriver string + DNS []string + DNSOptions []string + DNSSearch []string ExecOptions []string ExecRoot string GraphDriver string @@ -29,10 +29,24 @@ type CommonConfig struct { LogConfig runconfig.LogConfig Mtu int Pidfile string + RemappedRoot string Root string TrustKeyPath string DefaultNetwork string - NetworkKVStore string + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string } // InstallCommonFlags adds command-line options to the top-level flag parser for @@ -47,12 +61,15 @@ func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, "/var/run/docker", usageFn("Root of the Docker execdriver")) cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run")) cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use")) - cmd.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, defaultExec, usageFn("Exec driver to use")) cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU")) // FIXME: why the inconsistency between "hosts" and "sockets"? - cmd.Var(opts.NewListOptsRef(&config.Dns, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) - cmd.Var(opts.NewListOptsRef(&config.DnsSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) + cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) + cmd.Var(opts.NewListOptsRef(&config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use")) + cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) cmd.Var(opts.NewListOptsRef(&config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon")) cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs")) cmd.Var(opts.NewMapOpts(config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options")) + cmd.StringVar(&config.ClusterAdvertise, []string{"-cluster-advertise"}, "", usageFn("Address or interface name to advertise")) + cmd.StringVar(&config.ClusterStore, []string{"-cluster-store"}, "", usageFn("Set the cluster store")) + cmd.Var(opts.NewMapOpts(config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options")) } diff --git a/vendor/github.com/docker/docker/daemon/config_experimental.go b/vendor/github.com/docker/docker/daemon/config_experimental.go index b3a1d3fb..f1c4bb92 100644 --- a/vendor/github.com/docker/docker/daemon/config_experimental.go +++ b/vendor/github.com/docker/docker/daemon/config_experimental.go @@ -2,9 +2,118 @@ package daemon -import flag "github.com/docker/docker/pkg/mflag" +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/pkg/idtools" + flag "github.com/docker/docker/pkg/mflag" + "github.com/opencontainers/runc/libcontainer/user" +) func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { - cmd.StringVar(&config.DefaultNetwork, []string{"-default-network"}, "", usageFn("Set default network")) - cmd.StringVar(&config.NetworkKVStore, []string{"-kv-store"}, "", usageFn("Set KV Store configuration")) + cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces")) +} + +const ( + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" +) + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := user.LookupUid(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := user.LookupGid(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := user.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + userID = luser.Uid + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := user.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupID = group.Gid + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to a unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := user.LookupGid(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + group, err := user.LookupGroup(idparts[1]) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", idparts[1], err) + } + groupID = group.Gid + groupname = idparts[1] + } + } + return username, groupname, nil } diff --git a/vendor/github.com/docker/docker/daemon/config_unix.go b/vendor/github.com/docker/docker/daemon/config_unix.go index 5fd4934b..8772aad9 100644 --- a/vendor/github.com/docker/docker/daemon/config_unix.go +++ b/vendor/github.com/docker/docker/daemon/config_unix.go @@ -17,8 +17,6 @@ var ( ) // Config defines the configuration of a docker daemon. -// These are the configuration settings that you pass -// to the docker daemon when you launch it with say: `docker -d -e lxc` type Config struct { CommonConfig @@ -27,6 +25,7 @@ type Config struct { CorsHeaders string EnableCors bool EnableSelinuxSupport bool + RemappedRoot string SocketGroup string Ulimits map[string]*ulimit.Ulimit } @@ -70,10 +69,10 @@ func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) strin cmd.StringVar(&config.Bridge.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) cmd.StringVar(&config.Bridge.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) cmd.StringVar(&config.Bridge.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) - cmd.Var(opts.NewIpOpt(&config.Bridge.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) - cmd.Var(opts.NewIpOpt(&config.Bridge.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) + cmd.Var(opts.NewIPOpt(&config.Bridge.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) + cmd.Var(opts.NewIPOpt(&config.Bridge.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) cmd.BoolVar(&config.Bridge.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) - cmd.Var(opts.NewIpOpt(&config.Bridge.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) + cmd.Var(opts.NewIPOpt(&config.Bridge.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) cmd.BoolVar(&config.Bridge.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header")) cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API")) diff --git a/vendor/github.com/docker/docker/daemon/config_windows.go b/vendor/github.com/docker/docker/daemon/config_windows.go index dd7bb82a..bc0833aa 100644 --- a/vendor/github.com/docker/docker/daemon/config_windows.go +++ b/vendor/github.com/docker/docker/daemon/config_windows.go @@ -20,7 +20,7 @@ type bridgeConfig struct { // Config defines the configuration of a docker daemon. // These are the configuration settings that you pass -// to the docker daemon when you launch it with say: `docker -d -e windows` +// to the docker daemon when you launch it with say: `docker daemon -e windows` type Config struct { CommonConfig diff --git a/vendor/github.com/docker/docker/daemon/container.go b/vendor/github.com/docker/docker/daemon/container.go index ed03611d..fc64d0ee 100644 --- a/vendor/github.com/docker/docker/daemon/container.go +++ b/vendor/github.com/docker/docker/daemon/container.go @@ -5,10 +5,8 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" - "strings" "sync" "syscall" "time" @@ -20,76 +18,68 @@ import ( "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog" "github.com/docker/docker/daemon/network" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/broadcastwriter" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/broadcaster" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/docker/docker/volume" ) var ( - ErrNotATTY = errors.New("The PTY is not a file") - ErrNoTTY = errors.New("No PTY found") - ErrContainerStart = errors.New("The container failed to start. Unknown error") - ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") - ErrContainerRootfsReadonly = errors.New("container rootfs is marked read-only") + // ErrRootFSReadOnly is returned when a container + // rootfs is marked readonly. + ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") ) -type ErrContainerNotRunning struct { - id string -} - -func (e ErrContainerNotRunning) Error() string { - return fmt.Sprintf("Container %s is not running", e.id) -} - -type StreamConfig struct { - stdout *broadcastwriter.BroadcastWriter - stderr *broadcastwriter.BroadcastWriter +type streamConfig struct { + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered stdin io.ReadCloser stdinPipe io.WriteCloser } -// CommonContainer holds the settings for a container which are applicable -// across all platforms supported by the daemon. +// CommonContainer holds the fields for a container which are +// applicable across all platforms supported by the daemon. type CommonContainer struct { - StreamConfig - - *State `json:"State"` // Needed for remote api version <= 1.11 - root string // Path to the "home" of the container, including metadata. - basefs string // Path to the graphdriver mountpoint - - ID string - Created time.Time - Path string - Args []string - Config *runconfig.Config - ImageID string `json:"Image"` - NetworkSettings *network.Settings - LogPath string - Name string - Driver string - ExecDriver string - MountLabel, ProcessLabel string - RestartCount int - HasBeenStartedBefore bool - hostConfig *runconfig.HostConfig - command *execdriver.Command - monitor *containerMonitor - execCommands *execStore - daemon *Daemon + streamConfig + // embed for Container to support states directly. + *State `json:"State"` // Needed for remote api version <= 1.11 + root string // Path to the "home" of the container, including metadata. + basefs string // Path to the graphdriver mountpoint + ID string + Created time.Time + Path string + Args []string + Config *runconfig.Config + ImageID string `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + hostConfig *runconfig.HostConfig + command *execdriver.Command + monitor *containerMonitor + execCommands *execStore // logDriver for closing logDriver logger.Logger logCopier *logger.Copier } -func (container *Container) FromDisk() error { +func (container *Container) fromDisk() error { pth, err := container.jsonPath() if err != nil { return err @@ -104,8 +94,7 @@ func (container *Container) FromDisk() error { dec := json.NewDecoder(jsonSource) // Load container settings - // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it - if err := dec.Decode(container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { + if err := dec.Decode(container); err != nil { return err } @@ -116,24 +105,28 @@ func (container *Container) FromDisk() error { } func (container *Container) toDisk() error { - data, err := json.Marshal(container) - if err != nil { - return err - } - pth, err := container.jsonPath() if err != nil { return err } - if err := ioutil.WriteFile(pth, data, 0666); err != nil { + jsonSource, err := os.Create(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + enc := json.NewEncoder(jsonSource) + + // Save container settings + if err := enc.Encode(container); err != nil { return err } - return container.WriteHostConfig() + return container.writeHostConfig() } -func (container *Container) ToDisk() error { +func (container *Container) toDiskLocking() error { container.Lock() err := container.toDisk() container.Unlock() @@ -150,13 +143,11 @@ func (container *Container) readHostConfig() error { return err } - _, err = os.Stat(pth) - if os.IsNotExist(err) { - return nil - } - f, err := os.Open(pth) if err != nil { + if os.IsNotExist(err) { + return nil + } return err } defer f.Close() @@ -164,30 +155,22 @@ func (container *Container) readHostConfig() error { return json.NewDecoder(f).Decode(&container.hostConfig) } -func (container *Container) WriteHostConfig() error { - data, err := json.Marshal(container.hostConfig) - if err != nil { - return err - } - +func (container *Container) writeHostConfig() error { pth, err := container.hostConfigPath() if err != nil { return err } - return ioutil.WriteFile(pth, data, 0666) + f, err := os.Create(pth) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(&container.hostConfig) } -func (container *Container) LogEvent(action string) { - d := container.daemon - d.EventsService.Log( - action, - container.ID, - container.Config.Image, - ) -} - -// Evaluates `path` in the scope of the container's basefs, with proper path +// GetResourcePath evaluates `path` in the scope of the container's basefs, with proper path // sanitisation. Symlinks are all scoped to the basefs of the container, as // though the container's basefs was `/`. // @@ -220,95 +203,14 @@ func (container *Container) GetResourcePath(path string) (string, error) { // if no component of the returned path changes (such as a component // symlinking to a different path) between using this method and using the // path. See symlink.FollowSymlinkInScope for more details. -func (container *Container) GetRootResourcePath(path string) (string, error) { +func (container *Container) getRootResourcePath(path string) (string, error) { // IMPORTANT - These are paths on the OS where the daemon is running, hence // any filepath operations must be done in an OS agnostic way. cleanPath := filepath.Join(string(os.PathSeparator), path) return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) } -func (container *Container) Start() (err error) { - container.Lock() - defer container.Unlock() - - if container.Running { - return nil - } - - if container.removalInProgress || container.Dead { - return fmt.Errorf("Container is marked for removal and cannot be started.") - } - - // if we encounter an error during start we need to ensure that any other - // setup has been cleaned up properly - defer func() { - if err != nil { - container.setError(err) - // if no one else has set it, make sure we don't leave it at zero - if container.ExitCode == 0 { - container.ExitCode = 128 - } - container.toDisk() - container.cleanup() - container.LogEvent("die") - } - }() - - if err := container.Mount(); err != nil { - return err - } - - // No-op if non-Windows. Once the container filesystem is mounted, - // prepare the layer to boot using the Windows driver. - if err := container.PrepareStorage(); err != nil { - return err - } - - if err := container.initializeNetworking(); err != nil { - return err - } - linkedEnv, err := container.setupLinkedContainers() - if err != nil { - return err - } - if err := container.setupWorkingDirectory(); err != nil { - return err - } - env := container.createDaemonEnvironment(linkedEnv) - if err := populateCommand(container, env); err != nil { - return err - } - - mounts, err := container.setupMounts() - if err != nil { - return err - } - - container.command.Mounts = mounts - return container.waitForStart() -} - -func (container *Container) Run() error { - if err := container.Start(); err != nil { - return err - } - container.HasBeenStartedBefore = true - container.WaitStop(-1 * time.Second) - return nil -} - -func (container *Container) Output() (output []byte, err error) { - pipe := container.StdoutPipe() - defer pipe.Close() - if err := container.Start(); err != nil { - return nil, err - } - output, err = ioutil.ReadAll(pipe) - container.WaitStop(-1 * time.Second) - return output, err -} - -// StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data +// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data // to the standard input of the container's active process. // Container.StdoutPipe and Container.StderrPipe each return a ReadCloser // which can be used to retrieve the standard output (and error) generated @@ -316,376 +218,64 @@ func (container *Container) Output() (output []byte, err error) { // copied and delivered to all StdoutPipe and StderrPipe consumers, using // a kind of "broadcaster". -func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { +func (streamConfig *streamConfig) StdinPipe() io.WriteCloser { return streamConfig.stdinPipe } -func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { +func (streamConfig *streamConfig) StdoutPipe() io.ReadCloser { reader, writer := io.Pipe() - streamConfig.stdout.AddWriter(writer) + streamConfig.stdout.Add(writer) return ioutils.NewBufReader(reader) } -func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { +func (streamConfig *streamConfig) StderrPipe() io.ReadCloser { reader, writer := io.Pipe() - streamConfig.stderr.AddWriter(writer) + streamConfig.stderr.Add(writer) return ioutils.NewBufReader(reader) } -func (container *Container) isNetworkAllocated() bool { - return container.NetworkSettings.IPAddress != "" -} - -// cleanup releases any network resources allocated to the container along with any rules -// around how containers are linked together. It also unmounts the container's root filesystem. -func (container *Container) cleanup() { - container.ReleaseNetwork() - - if err := container.CleanupStorage(); err != nil { - logrus.Errorf("%v: Failed to cleanup storage: %v", container.ID, err) - } - - if err := container.Unmount(); err != nil { - logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) - } - - for _, eConfig := range container.execCommands.s { - container.daemon.unregisterExecCommand(eConfig) - } - - container.UnmountVolumes(false) -} - -func (container *Container) KillSig(sig int) error { - logrus.Debugf("Sending %d to %s", sig, container.ID) - container.Lock() - defer container.Unlock() - - // We could unpause the container for them rather than returning this error - if container.Paused { - return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) - } - - if !container.Running { - return ErrContainerNotRunning{container.ID} - } - - // signal to the monitor that it should not restart the container - // after we send the kill signal +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { container.monitor.ExitOnNext() - - // if the container is currently restarting we do not need to send the signal - // to the process. Telling the monitor that it should exit on it's next event - // loop is enough - if container.Restarting { - return nil - } - - if err := container.daemon.Kill(container, sig); err != nil { - return err - } - container.LogEvent("kill") - return nil -} - -// Wrapper aroung KillSig() suppressing "no such process" error. -func (container *Container) killPossiblyDeadProcess(sig int) error { - err := container.KillSig(sig) - if err == syscall.ESRCH { - logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig) - return nil - } - return err -} - -func (container *Container) Pause() error { - container.Lock() - defer container.Unlock() - - // We cannot Pause the container which is not running - if !container.Running { - return ErrContainerNotRunning{container.ID} - } - - // We cannot Pause the container which is already paused - if container.Paused { - return fmt.Errorf("Container %s is already paused", container.ID) - } - - if err := container.daemon.execDriver.Pause(container.command); err != nil { - return err - } - container.Paused = true - container.LogEvent("pause") - return nil -} - -func (container *Container) Unpause() error { - container.Lock() - defer container.Unlock() - - // We cannot unpause the container which is not running - if !container.Running { - return ErrContainerNotRunning{container.ID} - } - - // We cannot unpause the container which is not paused - if !container.Paused { - return fmt.Errorf("Container %s is not paused", container.ID) - } - - if err := container.daemon.execDriver.Unpause(container.command); err != nil { - return err - } - container.Paused = false - container.LogEvent("unpause") - return nil -} - -func (container *Container) Kill() error { - if !container.IsRunning() { - return ErrContainerNotRunning{container.ID} - } - - // 1. Send SIGKILL - if err := container.killPossiblyDeadProcess(9); err != nil { - // While normally we might "return err" here we're not going to - // because if we can't stop the container by this point then - // its probably because its already stopped. Meaning, between - // the time of the IsRunning() call above and now it stopped. - // Also, since the err return will be exec driver specific we can't - // look for any particular (common) error that would indicate - // that the process is already dead vs something else going wrong. - // So, instead we'll give it up to 2 more seconds to complete and if - // by that time the container is still running, then the error - // we got is probably valid and so we return it to the caller. - - if container.IsRunning() { - container.WaitStop(2 * time.Second) - if container.IsRunning() { - return err - } - } - } - - // 2. Wait for the process to die, in last resort, try to kill the process directly - if err := killProcessDirectly(container); err != nil { - return err - } - - container.WaitStop(-1 * time.Second) - return nil -} - -func (container *Container) Stop(seconds int) error { - if !container.IsRunning() { - return nil - } - - // 1. Send a SIGTERM - if err := container.killPossiblyDeadProcess(15); err != nil { - logrus.Infof("Failed to send SIGTERM to the process, force killing") - if err := container.killPossiblyDeadProcess(9); err != nil { - return err - } - } - - // 2. Wait for the process to exit on its own - if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { - logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) - // 3. If it doesn't, then send SIGKILL - if err := container.Kill(); err != nil { - container.WaitStop(-1 * time.Second) - return err - } - } - - container.LogEvent("stop") - return nil -} - -func (container *Container) Restart(seconds int) error { - // Avoid unnecessarily unmounting and then directly mounting - // the container when the container stops and then starts - // again - if err := container.Mount(); err == nil { - defer container.Unmount() - } - - if err := container.Stop(seconds); err != nil { - return err - } - - if err := container.Start(); err != nil { - return err - } - - container.LogEvent("restart") - return nil } +// Resize changes the TTY of the process running inside the container +// to the given height and width. The container must be running. func (container *Container) Resize(h, w int) error { - if !container.IsRunning() { - return ErrContainerNotRunning{container.ID} - } if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil { return err } - container.LogEvent("resize") return nil } -func (container *Container) Export() (archive.Archive, error) { - if err := container.Mount(); err != nil { - return nil, err - } - - archive, err := archive.Tar(container.basefs, archive.Uncompressed) - if err != nil { - container.Unmount() - return nil, err - } - arch := ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }) - container.LogEvent("export") - return arch, err -} - -func (container *Container) Mount() error { - return container.daemon.Mount(container) -} - -func (container *Container) changes() ([]archive.Change, error) { - return container.daemon.Changes(container) -} - -func (container *Container) Changes() ([]archive.Change, error) { - container.Lock() - defer container.Unlock() - return container.changes() -} - -func (container *Container) GetImage() (*image.Image, error) { - if container.daemon == nil { - return nil, fmt.Errorf("Can't get image of unregistered container") - } - return container.daemon.graph.Get(container.ImageID) -} - -func (container *Container) Unmount() error { - return container.daemon.Unmount(container) -} - func (container *Container) hostConfigPath() (string, error) { - return container.GetRootResourcePath("hostconfig.json") + return container.getRootResourcePath("hostconfig.json") } func (container *Container) jsonPath() (string, error) { - return container.GetRootResourcePath("config.json") + return container.getRootResourcePath("config.json") } -// This method must be exported to be used from the lxc template // This directory is only usable when the container is running -func (container *Container) RootfsPath() string { +func (container *Container) rootfsPath() string { return container.basefs } func validateID(id string) error { if id == "" { - return fmt.Errorf("Invalid empty id") + return derr.ErrorCodeEmptyID } return nil } -func (container *Container) Copy(resource string) (rc io.ReadCloser, err error) { - container.Lock() - - defer func() { - if err != nil { - // Wait to unlock the container until the archive is fully read - // (see the ReadCloseWrapper func below) or if there is an error - // before that occurs. - container.Unlock() - } - }() - - if err := container.Mount(); err != nil { - return nil, err - } - - defer func() { - if err != nil { - // unmount any volumes - container.UnmountVolumes(true) - // unmount the container's rootfs - container.Unmount() - } - }() - - if err := container.mountVolumes(); err != nil { - return nil, err - } - - basePath, err := container.GetResourcePath(resource) - if err != nil { - return nil, err - } - stat, err := os.Stat(basePath) - if err != nil { - return nil, err - } - var filter []string - if !stat.IsDir() { - d, f := filepath.Split(basePath) - basePath = d - filter = []string{f} - } else { - filter = []string{filepath.Base(basePath)} - basePath = filepath.Dir(basePath) - } - archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ - Compression: archive.Uncompressed, - IncludeFiles: filter, - }) - if err != nil { - return nil, err - } - - if err := container.PrepareStorage(); err != nil { - container.Unmount() - return nil, err - } - - reader := ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.CleanupStorage() - container.UnmountVolumes(true) - container.Unmount() - container.Unlock() - return err - }) - container.LogEvent("copy") - return reader, nil -} - // Returns true if the container exposes a certain port -func (container *Container) Exposes(p nat.Port) bool { +func (container *Container) exposes(p nat.Port) bool { _, exists := container.Config.ExposedPorts[p] return exists } -func (container *Container) HostConfig() *runconfig.HostConfig { - return container.hostConfig -} - -func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) { - container.hostConfig = hostConfig -} - -func (container *Container) getLogConfig() runconfig.LogConfig { +func (container *Container) getLogConfig(defaultConfig runconfig.LogConfig) runconfig.LogConfig { cfg := container.hostConfig.LogConfig if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured if cfg.Type == "" { @@ -694,20 +284,14 @@ func (container *Container) getLogConfig() runconfig.LogConfig { return cfg } // Use daemon's default log config for containers - return container.daemon.defaultLogConfig + return defaultConfig } -func (container *Container) getLogger() (logger.Logger, error) { - if container.logDriver != nil && container.IsRunning() { - return container.logDriver, nil - } - cfg := container.getLogConfig() - if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { - return nil, err - } +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger(cfg runconfig.LogConfig) (logger.Logger, error) { c, err := logger.GetLogDriver(cfg.Type) if err != nil { - return nil, fmt.Errorf("Failed to get logging factory: %v", err) + return nil, derr.ErrorCodeLoggingFactory.WithArgs(err) } ctx := logger.Context{ Config: cfg.Config, @@ -718,11 +302,13 @@ func (container *Container) getLogger() (logger.Logger, error) { ContainerImageID: container.ImageID, ContainerImageName: container.Config.Image, ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, } // Set logging file for "json-logger" if cfg.Type == jsonfilelog.Name { - ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + ctx.LogPath, err = container.getRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) if err != nil { return nil, err } @@ -730,45 +316,7 @@ func (container *Container) getLogger() (logger.Logger, error) { return c(ctx) } -func (container *Container) startLogging() error { - cfg := container.getLogConfig() - if cfg.Type == "none" { - return nil // do not start logging routines - } - - l, err := container.getLogger() - if err != nil { - return fmt.Errorf("Failed to initialize logging driver: %v", err) - } - - copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) - container.logCopier = copier - copier.Run() - container.logDriver = l - - // set LogPath field only for json-file logdriver - if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { - container.LogPath = jl.LogPath() - } - - return nil -} - -func (container *Container) waitForStart() error { - container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) - - // block until we either receive an error from the initial start of the container's - // process or until the process is running in the container - select { - case <-container.monitor.startSignal: - case err := <-promise.Go(container.monitor.Start): - return err - } - - return nil -} - -func (container *Container) GetProcessLabel() string { +func (container *Container) getProcessLabel() string { // even if we have a process label return "" if we are running // in privileged mode if container.hostConfig.Privileged { @@ -777,154 +325,24 @@ func (container *Container) GetProcessLabel() string { return container.ProcessLabel } -func (container *Container) GetMountLabel() string { +func (container *Container) getMountLabel() string { if container.hostConfig.Privileged { return "" } return container.MountLabel } -func (container *Container) Stats() (*execdriver.ResourceStats, error) { - return container.daemon.Stats(container) -} - -func (c *Container) LogDriverType() string { - c.Lock() - defer c.Unlock() - if c.hostConfig.LogConfig.Type == "" { - return c.daemon.defaultLogConfig.Type - } - return c.hostConfig.LogConfig.Type -} - -func (container *Container) GetExecIDs() []string { +func (container *Container) getExecIDs() []string { return container.execCommands.List() } -func (container *Container) Exec(execConfig *execConfig) error { - container.Lock() - defer container.Unlock() - - callback := func(processConfig *execdriver.ProcessConfig, pid int) { - if processConfig.Tty { - // The callback is called after the process Start() - // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave - // which we close here. - if c, ok := processConfig.Stdout.(io.Closer); ok { - c.Close() - } - } - close(execConfig.waitStart) - } - - // We use a callback here instead of a goroutine and an chan for - // syncronization purposes - cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) }) - - // Exec should not return until the process is actually running - select { - case <-execConfig.waitStart: - case err := <-cErr: - return err - } - - return nil +// Attach connects to the container's TTY, delegating to standard +// streams or websockets depending on the configuration. +func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { + return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr) } -func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error { - var ( - err error - exitCode int - ) - pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin) - exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback) - if err != nil { - logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) - } - logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) - if execConfig.OpenStdin { - if err := execConfig.StreamConfig.stdin.Close(); err != nil { - logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err) - } - } - if err := execConfig.StreamConfig.stdout.Clean(); err != nil { - logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err) - } - if err := execConfig.StreamConfig.stderr.Clean(); err != nil { - logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err) - } - if execConfig.ProcessConfig.Terminal != nil { - if err := execConfig.ProcessConfig.Terminal.Close(); err != nil { - logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) - } - } - // remove the exec command from the container's store only and not the - // daemon's store so that the exec command can be inspected. - container.execCommands.Delete(execConfig.ID) - return err -} - -func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { - return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr) -} - -func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error { - if logs { - logDriver, err := c.getLogger() - if err != nil { - return err - } - cLog, ok := logDriver.(logger.LogReader) - if !ok { - return logger.ErrReadLogsNotSupported - } - logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) - - LogLoop: - for { - select { - case msg, ok := <-logs.Msg: - if !ok { - break LogLoop - } - if msg.Source == "stdout" && stdout != nil { - stdout.Write(msg.Line) - } - if msg.Source == "stderr" && stderr != nil { - stderr.Write(msg.Line) - } - case err := <-logs.Err: - logrus.Errorf("Error streaming logs: %v", err) - break LogLoop - } - } - } - - c.LogEvent("attach") - - //stream - if stream { - var stdinPipe io.ReadCloser - if stdin != nil { - r, w := io.Pipe() - go func() { - defer w.Close() - defer logrus.Debugf("Closing buffered stdin pipe") - io.Copy(w, stdin) - }() - stdinPipe = r - } - <-c.Attach(stdinPipe, stdout, stderr) - // If we are in stdinonce mode, wait for the process to end - // otherwise, simply return - if c.Config.StdinOnce && !c.Config.Tty { - c.WaitStop(-1 * time.Second) - } - } - return nil -} - -func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { +func attach(streamConfig *streamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { var ( cStdout, cStderr io.ReadCloser cStdin io.WriteCloser @@ -1071,11 +489,12 @@ func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) func (container *Container) shouldRestart() bool { return container.hostConfig.RestartPolicy.Name == "always" || + (container.hostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) || (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) } -func (container *Container) mountVolumes() error { - mounts, err := container.setupMounts() +func (daemon *Daemon) mountVolumes(container *Container) error { + mounts, err := daemon.setupMounts(container) if err != nil { return err } @@ -1108,27 +527,83 @@ func (container *Container) mountVolumes() error { return nil } -func (container *Container) copyImagePathContent(v volume.Volume, destination string) error { - rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs) - if err != nil { - return err - } +func (container *Container) unmountVolumes(forceSyscall bool) error { + var ( + volumeMounts []volume.MountPoint + err error + ) - if _, err = ioutil.ReadDir(rootfs); err != nil { - if os.IsNotExist(err) { - return nil + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + return err } + + volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume}) + } + + // Append any network mounts to the list (this is a no-op on Windows) + if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil { return err } - path, err := v.Mount() - if err != nil { - return err + for _, volumeMount := range volumeMounts { + if forceSyscall { + if err := system.Unmount(volumeMount.Destination); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to force umount %v", container.ID, err) + } + } + + if volumeMount.Volume != nil { + if err := volumeMount.Volume.Unmount(); err != nil { + return err + } + } } - if err := copyExistingContents(rootfs, path); err != nil { - return err - } - - return v.Unmount() + return nil +} + +func (container *Container) addBindMountPoint(name, source, destination string, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: name, + Source: source, + Destination: destination, + RW: rw, + } +} + +func (container *Container) addLocalMountPoint(name, destination string, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: name, + Driver: volume.DefaultDriverName, + Destination: destination, + RW: rw, + } +} + +func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + } +} + +func (container *Container) isDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +func (container *Container) stopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) } diff --git a/vendor/github.com/docker/docker/daemon/container_unit_test.go b/vendor/github.com/docker/docker/daemon/container_unit_test.go index ab30a8e3..71d37cf4 100644 --- a/vendor/github.com/docker/docker/daemon/container_unit_test.go +++ b/vendor/github.com/docker/docker/daemon/container_unit_test.go @@ -1,6 +1,11 @@ package daemon -import "testing" +import ( + "testing" + + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/runconfig" +) func TestGetFullName(t *testing.T) { name, err := GetFullContainerName("testing") @@ -31,3 +36,31 @@ func TestValidContainerNames(t *testing.T) { } } } + +func TestContainerStopSignal(t *testing.T) { + c := &Container{ + CommonContainer: CommonContainer{ + Config: &runconfig.Config{}, + }, + } + + def, err := signal.ParseSignal(signal.DefaultStopSignal) + if err != nil { + t.Fatal(err) + } + + s := c.stopSignal() + if s != int(def) { + t.Fatalf("Expected %v, got %v", def, s) + } + + c = &Container{ + CommonContainer: CommonContainer{ + Config: &runconfig.Config{StopSignal: "SIGKILL"}, + }, + } + s = c.stopSignal() + if s != 9 { + t.Fatalf("Expected 9, got %v", s) + } +} diff --git a/vendor/github.com/docker/docker/daemon/container_unix.go b/vendor/github.com/docker/docker/daemon/container_unix.go index 8015acaf..c797059e 100644 --- a/vendor/github.com/docker/docker/daemon/container_unix.go +++ b/vendor/github.com/docker/docker/daemon/container_unix.go @@ -18,11 +18,12 @@ import ( "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/links" "github.com/docker/docker/daemon/network" - "github.com/docker/docker/pkg/archive" + derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/runconfig" @@ -37,8 +38,13 @@ import ( "github.com/opencontainers/runc/libcontainer/label" ) +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +// Container holds the fields specific to unixen implementations. See +// CommonContainer for standard fields common to all containers. type Container struct { CommonContainer @@ -47,17 +53,18 @@ type Container struct { AppArmorProfile string HostnamePath string HostsPath string - MountPoints map[string]*mountPoint + ShmPath string // TODO Windows - Factor this out (GH15862) + MqueuePath string // TODO Windows - Factor this out (GH15862) ResolvConfPath string - UpdateDns bool - Volumes map[string]string // Deprecated since 1.7, kept for backwards compatibility - VolumesRW map[string]bool // Deprecated since 1.7, kept for backwards compatibility + + Volumes map[string]string // Deprecated since 1.7, kept for backwards compatibility + VolumesRW map[string]bool // Deprecated since 1.7, kept for backwards compatibility } func killProcessDirectly(container *Container) error { if _, err := container.WaitStop(10 * time.Second); err != nil { // Ensure that we don't kill ourselves - if pid := container.GetPid(); pid != 0 { + if pid := container.GetPID(); pid != 0 { logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) if err := syscall.Kill(pid, 9); err != nil { if err != syscall.ESRCH { @@ -70,25 +77,32 @@ func killProcessDirectly(container *Container) error { return nil } -func (container *Container) setupLinkedContainers() ([]string, error) { - var ( - env []string - daemon = container.daemon - ) - children, err := daemon.Children(container.Name) +func (daemon *Daemon) setupLinkedContainers(container *Container) ([]string, error) { + var env []string + children, err := daemon.children(container.Name) if err != nil { return nil, err } + bridgeSettings := container.NetworkSettings.Networks["bridge"] + if bridgeSettings == nil { + return nil, nil + } + if len(children) > 0 { for linkAlias, child := range children { if !child.IsRunning() { - return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + return nil, derr.ErrorCodeLinkNotRunning.WithArgs(child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks["bridge"] + if childBridgeSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) } link := links.NewLink( - container.NetworkSettings.IPAddress, - child.NetworkSettings.IPAddress, + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, linkAlias, child.Config.Env, child.Config.ExposedPorts, @@ -165,19 +179,19 @@ func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs. return devs, nil } - return devs, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) + return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err) } -func populateCommand(c *Container, env []string) error { +func (daemon *Daemon) populateCommand(c *Container, env []string) error { var en *execdriver.Network if !c.Config.NetworkDisabled { - en = &execdriver.Network{ - NamespacePath: c.NetworkSettings.SandboxKey, + en = &execdriver.Network{} + if !daemon.execDriver.SupportsHooks() || c.hostConfig.NetworkMode.IsHost() { + en.NamespacePath = c.NetworkSettings.SandboxKey } - parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) - if parts[0] == "container" { - nc, err := c.getNetworkedContainer() + if c.hostConfig.NetworkMode.IsContainer() { + nc, err := daemon.getNetworkedContainer(c.ID, c.hostConfig.NetworkMode.ConnectedContainer()) if err != nil { return err } @@ -186,15 +200,37 @@ func populateCommand(c *Container, env []string) error { } ipc := &execdriver.Ipc{} + var err error + c.ShmPath, err = c.shmPath() + if err != nil { + return err + } + + c.MqueuePath, err = c.mqueuePath() + if err != nil { + return err + } if c.hostConfig.IpcMode.IsContainer() { - ic, err := c.getIpcContainer() + ic, err := daemon.getIpcContainer(c) if err != nil { return err } ipc.ContainerID = ic.ID + c.ShmPath = ic.ShmPath + c.MqueuePath = ic.MqueuePath } else { ipc.HostIpc = c.hostConfig.IpcMode.IsHost() + if ipc.HostIpc { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + if _, err := os.Stat("/dev/mqueue"); err != nil { + return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + c.MqueuePath = "/dev/mqueue" + } } pid := &execdriver.Pid{} @@ -219,12 +255,6 @@ func populateCommand(c *Container, env []string) error { autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices) - // TODO: this can be removed after lxc-conf is fully deprecated - lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig) - if err != nil { - return err - } - var rlimits []*ulimit.Rlimit ulimits := c.hostConfig.Ulimits @@ -233,7 +263,7 @@ func populateCommand(c *Container, env []string) error { for _, ul := range ulimits { ulIdx[ul.Name] = ul } - for name, ul := range c.daemon.config.Ulimits { + for name, ul := range daemon.configStore.Ulimits { if _, exists := ulIdx[name]; !exists { ulimits = append(ulimits, ul) } @@ -248,14 +278,18 @@ func populateCommand(c *Container, env []string) error { } resources := &execdriver.Resources{ - Memory: c.hostConfig.Memory, + CommonResources: execdriver.CommonResources{ + Memory: c.hostConfig.Memory, + MemoryReservation: c.hostConfig.MemoryReservation, + CPUShares: c.hostConfig.CPUShares, + BlkioWeight: c.hostConfig.BlkioWeight, + }, MemorySwap: c.hostConfig.MemorySwap, - CPUShares: c.hostConfig.CPUShares, + KernelMemory: c.hostConfig.KernelMemory, CpusetCpus: c.hostConfig.CpusetCpus, CpusetMems: c.hostConfig.CpusetMems, CPUPeriod: c.hostConfig.CPUPeriod, CPUQuota: c.hostConfig.CPUQuota, - BlkioWeight: c.hostConfig.BlkioWeight, Rlimits: rlimits, OomKillDisable: c.hostConfig.OomKillDisable, MemorySwappiness: -1, @@ -276,28 +310,40 @@ func populateCommand(c *Container, env []string) error { processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} processConfig.Env = env + remappedRoot := &execdriver.User{} + rootUID, rootGID := daemon.GetRemappedUIDGID() + if rootUID != 0 { + remappedRoot.UID = rootUID + remappedRoot.GID = rootGID + } + uidMap, gidMap := daemon.GetUIDGIDMaps() + c.command = &execdriver.Command{ - ID: c.ID, - Rootfs: c.RootfsPath(), - ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, - InitPath: "/.dockerinit", - WorkingDir: c.Config.WorkingDir, - Network: en, - Ipc: ipc, - Pid: pid, - UTS: uts, - Resources: resources, + CommonCommand: execdriver.CommonCommand{ + ID: c.ID, + InitPath: "/.dockerinit", + MountLabel: c.getMountLabel(), + Network: en, + ProcessConfig: processConfig, + ProcessLabel: c.getProcessLabel(), + Rootfs: c.rootfsPath(), + Resources: resources, + WorkingDir: c.Config.WorkingDir, + }, AllowedDevices: allowedDevices, + AppArmorProfile: c.AppArmorProfile, AutoCreatedDevices: autoCreatedDevices, CapAdd: c.hostConfig.CapAdd.Slice(), CapDrop: c.hostConfig.CapDrop.Slice(), - GroupAdd: c.hostConfig.GroupAdd, - ProcessConfig: processConfig, - ProcessLabel: c.GetProcessLabel(), - MountLabel: c.GetMountLabel(), - LxcConfig: lxcConfig, - AppArmorProfile: c.AppArmorProfile, CgroupParent: c.hostConfig.CgroupParent, + GIDMapping: gidMap, + GroupAdd: c.hostConfig.GroupAdd, + Ipc: ipc, + Pid: pid, + ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, + RemappedRoot: remappedRoot, + UIDMapping: uidMap, + UTS: uts, } return nil @@ -322,24 +368,23 @@ func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Devi return append(devs, userDevices...) } -// GetSize, return real size, virtual size -func (container *Container) GetSize() (int64, int64) { +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(container *Container) (int64, int64) { var ( sizeRw, sizeRootfs int64 err error - driver = container.daemon.driver ) - if err := container.Mount(); err != nil { + if err := daemon.Mount(container); err != nil { logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } - defer container.Unmount() + defer daemon.Unmount(container) initID := fmt.Sprintf("%s-init", container.ID) - sizeRw, err = driver.DiffSize(container.ID, initID) + sizeRw, err = daemon.driver.DiffSize(container.ID, initID) if err != nil { - logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", daemon.driver, container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 @@ -374,7 +419,7 @@ func (container *Container) trySetNetworkMount(destination string, path string) } func (container *Container) buildHostnameFile() error { - hostnamePath, err := container.GetRootResourcePath("hostname") + hostnamePath, err := container.getRootResourcePath("hostname") if err != nil { return err } @@ -386,51 +431,68 @@ func (container *Container) buildHostnameFile() error { return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) } -func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, error) { +func (daemon *Daemon) buildSandboxOptions(container *Container, n libnetwork.Network) ([]libnetwork.SandboxOption, error) { var ( - joinOptions []libnetwork.EndpointOption + sboxOptions []libnetwork.SandboxOption err error dns []string dnsSearch []string + dnsOptions []string ) - joinOptions = append(joinOptions, libnetwork.JoinOptionHostname(container.Config.Hostname), - libnetwork.JoinOptionDomainname(container.Config.Domainname)) + sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), + libnetwork.OptionDomainname(container.Config.Domainname)) if container.hostConfig.NetworkMode.IsHost() { - joinOptions = append(joinOptions, libnetwork.JoinOptionUseDefaultSandbox()) + sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) + sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) + sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) + } else if daemon.execDriver.SupportsHooks() { + // OptionUseExternalKey is mandatory for userns support. + // But optional for non-userns support + sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) } - container.HostsPath, err = container.GetRootResourcePath("hosts") + container.HostsPath, err = container.getRootResourcePath("hosts") if err != nil { return nil, err } - joinOptions = append(joinOptions, libnetwork.JoinOptionHostsPath(container.HostsPath)) + sboxOptions = append(sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) - container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") + container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf") if err != nil { return nil, err } - joinOptions = append(joinOptions, libnetwork.JoinOptionResolvConfPath(container.ResolvConfPath)) + sboxOptions = append(sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) if len(container.hostConfig.DNS) > 0 { dns = container.hostConfig.DNS - } else if len(container.daemon.config.Dns) > 0 { - dns = container.daemon.config.Dns + } else if len(daemon.configStore.DNS) > 0 { + dns = daemon.configStore.DNS } for _, d := range dns { - joinOptions = append(joinOptions, libnetwork.JoinOptionDNS(d)) + sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) } if len(container.hostConfig.DNSSearch) > 0 { dnsSearch = container.hostConfig.DNSSearch - } else if len(container.daemon.config.DnsSearch) > 0 { - dnsSearch = container.daemon.config.DnsSearch + } else if len(daemon.configStore.DNSSearch) > 0 { + dnsSearch = daemon.configStore.DNSSearch } for _, ds := range dnsSearch { - joinOptions = append(joinOptions, libnetwork.JoinOptionDNSSearch(ds)) + sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) + } + + if len(container.hostConfig.DNSOptions) > 0 { + dnsOptions = container.hostConfig.DNSOptions + } else if len(daemon.configStore.DNSOptions) > 0 { + dnsOptions = daemon.configStore.DNSOptions + } + + for _, ds := range dnsOptions { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) } if container.NetworkSettings.SecondaryIPAddresses != nil { @@ -440,18 +502,38 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err } for _, a := range container.NetworkSettings.SecondaryIPAddresses { - joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(name, a.Addr)) + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) } } + for _, extraHost := range container.hostConfig.ExtraHosts { + // allow IPv6 addresses in extra hosts; only split on first ":" + parts := strings.SplitN(extraHost, ":", 2) + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) + } + + // Link feature is supported only for the default bridge network. + // return if this call to build join options is not for default bridge network + if n.Name() != "bridge" { + return sboxOptions, nil + } + + ep, _ := container.getEndpointInNetwork(n) + if ep == nil { + return sboxOptions, nil + } + var childEndpoints, parentEndpoints []string - children, err := container.daemon.Children(container.Name) + children, err := daemon.children(container.Name) if err != nil { return nil, err } for linkAlias, child := range children { + if !isLinkable(child) { + return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) + } _, alias := path.Split(linkAlias) // allow access to the linked container via the alias, real name, and container hostname aliasList := alias + " " + child.Config.Hostname @@ -459,34 +541,30 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err if alias != child.Name[1:] { aliasList = aliasList + " " + child.Name[1:] } - joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(aliasList, child.NetworkSettings.IPAddress)) - if child.NetworkSettings.EndpointID != "" { - childEndpoints = append(childEndpoints, child.NetworkSettings.EndpointID) + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks["bridge"].IPAddress)) + cEndpoint, _ := child.getEndpointInNetwork(n) + if cEndpoint != nil && cEndpoint.ID() != "" { + childEndpoints = append(childEndpoints, cEndpoint.ID()) } } - for _, extraHost := range container.hostConfig.ExtraHosts { - // allow IPv6 addresses in extra hosts; only split on first ":" - parts := strings.SplitN(extraHost, ":", 2) - joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(parts[0], parts[1])) - } - - refs := container.daemon.ContainerGraph().RefPaths(container.ID) + bridgeSettings := container.NetworkSettings.Networks["bridge"] + refs := daemon.containerGraph().RefPaths(container.ID) for _, ref := range refs { if ref.ParentID == "0" { continue } - c, err := container.daemon.Get(ref.ParentID) + c, err := daemon.Get(ref.ParentID) if err != nil { logrus.Error(err) } - if c != nil && !container.daemon.config.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() { - logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress) - joinOptions = append(joinOptions, libnetwork.JoinOptionParentUpdate(c.NetworkSettings.EndpointID, ref.Name, container.NetworkSettings.IPAddress)) - if c.NetworkSettings.EndpointID != "" { - parentEndpoints = append(parentEndpoints, c.NetworkSettings.EndpointID) + if c != nil && !daemon.configStore.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() { + logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, bridgeSettings.IPAddress) + sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(c.ID, ref.Name, bridgeSettings.IPAddress)) + if ep.ID() != "" { + parentEndpoints = append(parentEndpoints, ep.ID()) } } } @@ -498,18 +576,29 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err }, } - joinOptions = append(joinOptions, libnetwork.JoinOptionGeneric(linkOptions)) + sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) - return joinOptions, nil + return sboxOptions, nil } -func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) { +func isLinkable(child *Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks["bridge"] + return ok +} + +func (container *Container) getEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { + endpointName := strings.TrimPrefix(container.Name, "/") + return n.EndpointByName(endpointName) +} + +func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) { if ep == nil { - return nil, fmt.Errorf("invalid endpoint while building port map info") + return nil, derr.ErrorCodeEmptyEndpoint } if networkSettings == nil { - return nil, fmt.Errorf("invalid networksettings while building port map info") + return nil, derr.ErrorCodeEmptyNetwork } driverInfo, err := ep.DriverInfo() @@ -522,10 +611,6 @@ func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork return networkSettings, nil } - if mac, ok := driverInfo[netlabel.MacAddress]; ok { - networkSettings.MacAddress = mac.(net.HardwareAddr).String() - } - networkSettings.Ports = nat.PortMap{} if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { @@ -533,7 +618,7 @@ func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork for _, tp := range exposedPorts { natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) if err != nil { - return nil, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) + return nil, derr.ErrorCodeParsingPort.WithArgs(tp.Port, err) } networkSettings.Ports[natPort] = nil } @@ -561,11 +646,11 @@ func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork func (container *Container) buildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) { if ep == nil { - return nil, fmt.Errorf("invalid endpoint while building port map info") + return nil, derr.ErrorCodeEmptyEndpoint } if networkSettings == nil { - return nil, fmt.Errorf("invalid networksettings while building port map info") + return nil, derr.ErrorCodeEmptyNetwork } epInfo := ep.Info() @@ -574,117 +659,145 @@ func (container *Container) buildEndpointInfo(n libnetwork.Network, ep libnetwor return networkSettings, nil } - ifaceList := epInfo.InterfaceList() - if len(ifaceList) == 0 { + if _, ok := networkSettings.Networks[n.Name()]; !ok { + networkSettings.Networks[n.Name()] = new(network.EndpointSettings) + } + networkSettings.Networks[n.Name()].EndpointID = ep.ID() + + iface := epInfo.Iface() + if iface == nil { return networkSettings, nil } - iface := ifaceList[0] - - ones, _ := iface.Address().Mask.Size() - networkSettings.IPAddress = iface.Address().IP.String() - networkSettings.IPPrefixLen = ones - - if iface.AddressIPv6().IP.To16() != nil { - onesv6, _ := iface.AddressIPv6().Mask.Size() - networkSettings.GlobalIPv6Address = iface.AddressIPv6().IP.String() - networkSettings.GlobalIPv6PrefixLen = onesv6 + if iface.MacAddress() != nil { + networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() } - if len(ifaceList) == 1 { - return networkSettings, nil - } - - networkSettings.SecondaryIPAddresses = make([]network.Address, 0, len(ifaceList)-1) - networkSettings.SecondaryIPv6Addresses = make([]network.Address, 0, len(ifaceList)-1) - for _, iface := range ifaceList[1:] { + if iface.Address() != nil { ones, _ := iface.Address().Mask.Size() - addr := network.Address{Addr: iface.Address().IP.String(), PrefixLen: ones} - networkSettings.SecondaryIPAddresses = append(networkSettings.SecondaryIPAddresses, addr) + networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() + networkSettings.Networks[n.Name()].IPPrefixLen = ones + } - if iface.AddressIPv6().IP.To16() != nil { - onesv6, _ := iface.AddressIPv6().Mask.Size() - addrv6 := network.Address{Addr: iface.AddressIPv6().IP.String(), PrefixLen: onesv6} - networkSettings.SecondaryIPv6Addresses = append(networkSettings.SecondaryIPv6Addresses, addrv6) - } + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 } return networkSettings, nil } -func (container *Container) updateJoinInfo(ep libnetwork.Endpoint) error { +func (container *Container) updateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if _, err := container.buildPortMapInfo(ep, container.NetworkSettings); err != nil { + return err + } + epInfo := ep.Info() if epInfo == nil { // It is not an error to get an empty endpoint info return nil } - - container.NetworkSettings.Gateway = epInfo.Gateway().String() - if epInfo.GatewayIPv6().To16() != nil { - container.NetworkSettings.IPv6Gateway = epInfo.GatewayIPv6().String() + if epInfo.Gateway() != nil { + container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() + } + if epInfo.GatewayIPv6().To16() != nil { + container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() } - - container.NetworkSettings.SandboxKey = epInfo.SandboxKey() return nil } -func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libnetwork.Endpoint) error { - networkSettings := &network.Settings{NetworkID: n.ID(), EndpointID: ep.ID()} - - networkSettings, err := container.buildPortMapInfo(n, ep, networkSettings) - if err != nil { - return err +func (daemon *Daemon) updateNetworkSettings(container *Container, n libnetwork.Network) error { + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)} } - networkSettings, err = container.buildEndpointInfo(n, ep, networkSettings) + for s := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(s) + if err != nil { + continue + } + + if sn.Name() == n.Name() { + // Avoid duplicate config + return nil + } + if !runconfig.NetworkMode(sn.Type()).IsPrivate() || + !runconfig.NetworkMode(n.Type()).IsPrivate() { + return runconfig.ErrConflictSharedNetwork + } + if runconfig.NetworkMode(sn.Name()).IsNone() || + runconfig.NetworkMode(n.Name()).IsNone() { + return runconfig.ErrConflictNoNetwork + } + } + container.NetworkSettings.Networks[n.Name()] = new(network.EndpointSettings) + + return nil +} + +func (daemon *Daemon) updateEndpointNetworkSettings(container *Container, n libnetwork.Network, ep libnetwork.Endpoint) error { + networkSettings, err := container.buildEndpointInfo(n, ep, container.NetworkSettings) if err != nil { return err } if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") { - networkSettings.Bridge = container.daemon.config.Bridge.Iface + networkSettings.Bridge = daemon.configStore.Bridge.Iface } - container.NetworkSettings = networkSettings + return nil +} + +func (container *Container) updateSandboxNetworkSettings(sb libnetwork.Sandbox) error { + container.NetworkSettings.SandboxID = sb.ID() + container.NetworkSettings.SandboxKey = sb.Key() return nil } // UpdateNetwork is used to update the container's network (e.g. when linked containers // get removed/unlinked). -func (container *Container) UpdateNetwork() error { - n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID) +func (daemon *Daemon) updateNetwork(container *Container) error { + ctrl := daemon.netController + sid := container.NetworkSettings.SandboxID + + sb, err := ctrl.SandboxByID(sid) if err != nil { - return fmt.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err) + return derr.ErrorCodeNoSandbox.WithArgs(sid, err) } - ep, err := n.EndpointByID(container.NetworkSettings.EndpointID) + // Find if container is connected to the default bridge network + var n libnetwork.Network + for name := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(name) + if err != nil { + continue + } + if sn.Name() == "bridge" { + n = sn + break + } + } + + if n == nil { + // Not connected to the default bridge network; Nothing to do + return nil + } + + options, err := daemon.buildSandboxOptions(container, n) if err != nil { - return fmt.Errorf("error locating endpoint id %s: %v", container.NetworkSettings.EndpointID, err) + return derr.ErrorCodeNetworkUpdate.WithArgs(err) } - if err := ep.Leave(container.ID); err != nil { - return fmt.Errorf("endpoint leave failed: %v", err) - - } - - joinOptions, err := container.buildJoinOptions() - if err != nil { - return fmt.Errorf("Update network failed: %v", err) - } - - if err := ep.Join(container.ID, joinOptions...); err != nil { - return fmt.Errorf("endpoint join failed: %v", err) - } - - if err := container.updateJoinInfo(ep); err != nil { - return fmt.Errorf("Updating join info failed: %v", err) + if err := sb.Refresh(options...); err != nil { + return derr.ErrorCodeNetworkRefresh.WithArgs(sid, err) } return nil } -func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointOption, error) { +func (container *Container) buildCreateEndpointOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { var ( portSpecs = make(nat.PortSet) bindings = make(nat.PortMap) @@ -709,8 +822,6 @@ func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointO } } - container.NetworkSettings.PortMapping = nil - ports := make([]nat.Port, len(portSpecs)) var i int for p := range portSpecs { @@ -729,10 +840,15 @@ func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointO for i := 0; i < len(binding); i++ { pbCopy := pb.GetCopy() newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) - if err != nil { - return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() } - pbCopy.HostPort = uint16(newP.Int()) + if err != nil { + return nil, derr.ErrorCodeHostPort.WithArgs(binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) pbCopy.HostIP = net.ParseIP(binding[i].HostIP) pbList = append(pbList, pbCopy) } @@ -759,168 +875,165 @@ func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointO createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) } + if n.Name() == "bridge" || container.NetworkSettings.IsAnonymousEndpoint { + createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) + } + return createOptions, nil } -func parseService(controller libnetwork.NetworkController, service string) (string, string, string) { - dn := controller.Config().Daemon.DefaultNetwork - dd := controller.Config().Daemon.DefaultDriver +func (daemon *Daemon) allocateNetwork(container *Container) error { + controller := daemon.netController - snd := strings.Split(service, ".") - if len(snd) > 2 { - return strings.Join(snd[:len(snd)-2], "."), snd[len(snd)-2], snd[len(snd)-1] + // Cleanup any stale sandbox left over due to ungraceful daemon shutdown + if err := controller.SandboxDestroy(container.ID); err != nil { + logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) } - if len(snd) > 1 { - return snd[0], snd[1], dd - } - return snd[0], dn, dd -} -func createNetwork(controller libnetwork.NetworkController, dnet string, driver string) (libnetwork.Network, error) { - createOptions := []libnetwork.NetworkOption{} - genericOption := options.Generic{} - - // Bridge driver is special due to legacy reasons - if runconfig.NetworkMode(driver).IsBridge() { - genericOption[netlabel.GenericData] = map[string]interface{}{ - "BridgeName": dnet, - "AllowNonDefaultBridge": "true", + updateSettings := false + if len(container.NetworkSettings.Networks) == 0 { + mode := container.hostConfig.NetworkMode + if container.Config.NetworkDisabled || mode.IsContainer() { + return nil } - networkOption := libnetwork.NetworkOptionGeneric(genericOption) - createOptions = append(createOptions, networkOption) - } - return controller.NewNetwork(driver, dnet, createOptions...) -} - -func (container *Container) secondaryNetworkRequired(primaryNetworkType string) bool { - switch primaryNetworkType { - case "bridge", "none", "host", "container": - return false - } - - if container.daemon.config.DisableBridge { - return false - } - - if container.Config.ExposedPorts != nil && len(container.Config.ExposedPorts) > 0 { - return true - } - if container.hostConfig.PortBindings != nil && len(container.hostConfig.PortBindings) > 0 { - return true - } - return false -} - -func (container *Container) AllocateNetwork() error { - mode := container.hostConfig.NetworkMode - controller := container.daemon.netController - if container.Config.NetworkDisabled || mode.IsContainer() { - return nil - } - - networkDriver := string(mode) - service := container.Config.PublishService - networkName := mode.NetworkName() - if mode.IsDefault() { - if service != "" { - service, networkName, networkDriver = parseService(controller, service) - } else { + networkName := mode.NetworkName() + if mode.IsDefault() { networkName = controller.Config().Daemon.DefaultNetwork - networkDriver = controller.Config().Daemon.DefaultDriver } - } else if service != "" { - return fmt.Errorf("conflicting options: publishing a service and network mode") + if mode.IsUserDefined() { + n, err := daemon.FindNetwork(networkName) + if err != nil { + return err + } + networkName = n.Name() + } + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + container.NetworkSettings.Networks[networkName] = new(network.EndpointSettings) + updateSettings = true } - if runconfig.NetworkMode(networkDriver).IsBridge() && container.daemon.config.DisableBridge { + for n := range container.NetworkSettings.Networks { + if err := daemon.connectToNetwork(container, n, updateSettings); err != nil { + return err + } + } + + return container.writeHostConfig() +} + +func (daemon *Daemon) getNetworkSandbox(container *Container) libnetwork.Sandbox { + var sb libnetwork.Sandbox + daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { + if s.ContainerID() == container.ID { + sb = s + return true + } + return false + }) + return sb +} + +// ConnectToNetwork connects a container to a netork +func (daemon *Daemon) ConnectToNetwork(container *Container, idOrName string) error { + if !container.Running { + return derr.ErrorCodeNotRunning.WithArgs(container.ID) + } + if err := daemon.connectToNetwork(container, idOrName, true); err != nil { + return err + } + if err := container.toDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + return nil +} + +func (daemon *Daemon) connectToNetwork(container *Container, idOrName string, updateSettings bool) (err error) { + if container.hostConfig.NetworkMode.IsContainer() { + return runconfig.ErrConflictSharedNetwork + } + + if runconfig.NetworkMode(idOrName).IsBridge() && + daemon.configStore.DisableBridge { container.Config.NetworkDisabled = true return nil } - if service == "" { - // dot character "." has a special meaning to support SERVICE[.NETWORK] format. - // For backward compatiblity, replacing "." with "-", instead of failing - service = strings.Replace(container.Name, ".", "-", -1) - // Service names dont like "/" in them. removing it instead of failing for backward compatibility - service = strings.Replace(service, "/", "", -1) - } + controller := daemon.netController - if container.secondaryNetworkRequired(networkDriver) { - // Configure Bridge as secondary network for port binding purposes - if err := container.configureNetwork("bridge", service, "bridge", false); err != nil { - return err - } - } - - if err := container.configureNetwork(networkName, service, networkDriver, mode.IsDefault()); err != nil { + n, err := daemon.FindNetwork(idOrName) + if err != nil { return err } - return container.WriteHostConfig() -} - -func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error { - controller := container.daemon.netController - n, err := controller.NetworkByName(networkName) - if err != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok || !canCreateNetwork { - return err - } - - if n, err = createNetwork(controller, networkName, networkDriver); err != nil { + if updateSettings { + if err := daemon.updateNetworkSettings(container, n); err != nil { return err } } - ep, err := n.EndpointByName(service) + ep, err := container.getEndpointInNetwork(n) + if err == nil { + return fmt.Errorf("container already connected to network %s", idOrName) + } + + if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok { + return err + } + + createOptions, err := container.buildCreateEndpointOptions(n) if err != nil { - if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok { + return err + } + + endpointName := strings.TrimPrefix(container.Name, "/") + ep, err = n.CreateEndpoint(endpointName, createOptions...) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := ep.Delete(); e != nil { + logrus.Warnf("Could not rollback container connection to network %s", idOrName) + } + } + }() + + if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { + return err + } + + sb := daemon.getNetworkSandbox(container) + if sb == nil { + options, err := daemon.buildSandboxOptions(container, n) + if err != nil { return err } - - createOptions, err := container.buildCreateEndpointOptions() + sb, err = controller.NewSandbox(container.ID, options...) if err != nil { return err } - ep, err = n.CreateEndpoint(service, createOptions...) - if err != nil { - return err - } + container.updateSandboxNetworkSettings(sb) } - if err := container.updateNetworkSettings(n, ep); err != nil { + if err := ep.Join(sb); err != nil { return err } - joinOptions, err := container.buildJoinOptions() - if err != nil { - return err - } - - if err := ep.Join(container.ID, joinOptions...); err != nil { - return err - } - - if err := container.updateJoinInfo(ep); err != nil { - return fmt.Errorf("Updating join info failed: %v", err) + if err := container.updateJoinInfo(n, ep); err != nil { + return derr.ErrorCodeJoinInfo.WithArgs(err) } return nil } -func (container *Container) initializeNetworking() error { +func (daemon *Daemon) initializeNetworking(container *Container) error { var err error - // Make sure NetworkMode has an acceptable value before - // initializing networking. - if container.hostConfig.NetworkMode == runconfig.NetworkMode("") { - container.hostConfig.NetworkMode = runconfig.NetworkMode("default") - } if container.hostConfig.NetworkMode.IsContainer() { // we need to get the hosts files from the container to join - nc, err := container.getNetworkedContainer() + nc, err := daemon.getNetworkedContainer(container.ID, container.hostConfig.NetworkMode.ConnectedContainer()) if err != nil { return err } @@ -946,264 +1059,371 @@ func (container *Container) initializeNetworking() error { } - if err := container.AllocateNetwork(); err != nil { + if err := daemon.allocateNetwork(container); err != nil { return err } return container.buildHostnameFile() } -func (container *Container) ExportRw() (archive.Archive, error) { - if container.daemon == nil { - return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) +// called from the libcontainer pre-start hook to set the network +// namespace configuration linkage to the libnetwork "sandbox" entity +func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error { + path := fmt.Sprintf("/proc/%d/ns/net", pid) + var sandbox libnetwork.Sandbox + search := libnetwork.SandboxContainerWalker(&sandbox, containerID) + daemon.netController.WalkSandboxes(search) + if sandbox == nil { + return derr.ErrorCodeNoSandbox.WithArgs(containerID, "no sandbox found") } - archive, err := container.daemon.Diff(container) - if err != nil { - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - return err - }), - nil + + return sandbox.SetKey(path) } -func (container *Container) getIpcContainer() (*Container, error) { +func (daemon *Daemon) getIpcContainer(container *Container) (*Container, error) { containerID := container.hostConfig.IpcMode.Container() - c, err := container.daemon.Get(containerID) + c, err := daemon.Get(containerID) if err != nil { return nil, err } if !c.IsRunning() { - return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + return nil, derr.ErrorCodeIPCRunning } return c, nil } func (container *Container) setupWorkingDirectory() error { - if container.Config.WorkingDir != "" { - container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + if container.Config.WorkingDir == "" { + return nil + } + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) - pth, err := container.GetResourcePath(container.Config.WorkingDir) - if err != nil { + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + pthInfo, err := os.Stat(pth) + if err != nil { + if !os.IsNotExist(err) { return err } - pthInfo, err := os.Stat(pth) - if err != nil { - if !os.IsNotExist(err) { - return err - } - - if err := system.MkdirAll(pth, 0755); err != nil { - return err - } - } - if pthInfo != nil && !pthInfo.IsDir() { - return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + if err := system.MkdirAll(pth, 0755); err != nil { + return err } } + if pthInfo != nil && !pthInfo.IsDir() { + return derr.ErrorCodeNotADir.WithArgs(container.Config.WorkingDir) + } return nil } -func (container *Container) getNetworkedContainer() (*Container, error) { - parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2) - switch parts[0] { - case "container": - if len(parts) != 2 { - return nil, fmt.Errorf("no container specified to join network") - } - nc, err := container.daemon.Get(parts[1]) - if err != nil { - return nil, err - } - if container == nc { - return nil, fmt.Errorf("cannot join own network") - } - if !nc.IsRunning() { - return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1]) - } - return nc, nil - default: - return nil, fmt.Errorf("network mode not set to container") +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*Container, error) { + nc, err := daemon.Get(connectedContainerID) + if err != nil { + return nil, err } + if containerID == nc.ID { + return nil, derr.ErrorCodeJoinSelf + } + if !nc.IsRunning() { + return nil, derr.ErrorCodeJoinRunning.WithArgs(connectedContainerID) + } + return nc, nil } -func (container *Container) ReleaseNetwork() { +func (daemon *Daemon) releaseNetwork(container *Container) { if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { return } - eid := container.NetworkSettings.EndpointID - nid := container.NetworkSettings.NetworkID + sid := container.NetworkSettings.SandboxID + networks := container.NetworkSettings.Networks + for n := range networks { + networks[n] = &network.EndpointSettings{} + } - container.NetworkSettings = &network.Settings{} + container.NetworkSettings = &network.Settings{Networks: networks} - if nid == "" || eid == "" { + if sid == "" || len(networks) == 0 { return } - n, err := container.daemon.netController.NetworkByID(nid) + sb, err := daemon.netController.SandboxByID(sid) if err != nil { - logrus.Errorf("error locating network id %s: %v", nid, err) + logrus.Errorf("error locating sandbox id %s: %v", sid, err) return } - ep, err := n.EndpointByID(eid) - if err != nil { - logrus.Errorf("error locating endpoint id %s: %v", eid, err) - return - } - - switch { - case container.hostConfig.NetworkMode.IsHost(): - if err := ep.Leave(container.ID); err != nil { - logrus.Errorf("Error leaving endpoint id %s for container %s: %v", eid, container.ID, err) - return - } - default: - if err := container.daemon.netController.LeaveAll(container.ID); err != nil { - logrus.Errorf("Leave all failed for %s: %v", container.ID, err) - return - } - } - - // In addition to leaving all endpoints, delete implicitly created endpoint - if container.Config.PublishService == "" { - if err := ep.Delete(); err != nil { - logrus.Errorf("deleting endpoint failed: %v", err) - } + if err := sb.Delete(); err != nil { + logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) } } -func (container *Container) UnmountVolumes(forceSyscall bool) error { - var volumeMounts []mountPoint - - for _, mntPoint := range container.MountPoints { - dest, err := container.GetResourcePath(mntPoint.Destination) - if err != nil { - return err - } - - volumeMounts = append(volumeMounts, mountPoint{Destination: dest, Volume: mntPoint.Volume}) +// DisconnectFromNetwork disconnects a container from a network +func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error { + if !container.Running { + return derr.ErrorCodeNotRunning.WithArgs(container.ID) } + if err := container.disconnectFromNetwork(n); err != nil { + return err + } + + if err := container.toDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + return nil +} + +func (container *Container) disconnectFromNetwork(n libnetwork.Network) error { + var ( + ep libnetwork.Endpoint + sbox libnetwork.Sandbox + ) + + s := func(current libnetwork.Endpoint) bool { + epInfo := current.Info() + if epInfo == nil { + return false + } + if sb := epInfo.Sandbox(); sb != nil { + if sb.ContainerID() == container.ID { + ep = current + sbox = sb + return true + } + } + return false + } + n.WalkEndpoints(s) + + if ep == nil { + return fmt.Errorf("container %s is not connected to the network", container.ID) + } + + if err := ep.Leave(sbox); err != nil { + return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) + } + + if err := ep.Delete(); err != nil { + return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) + } + + delete(container.NetworkSettings.Networks, n.Name()) + return nil +} + +// appendNetworkMounts appends any network mounts to the array of mount points passed in +func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { for _, mnt := range container.networkMounts() { dest, err := container.GetResourcePath(mnt.Destination) if err != nil { - return err + return nil, err } - - volumeMounts = append(volumeMounts, mountPoint{Destination: dest}) + volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest}) } - - for _, volumeMount := range volumeMounts { - if forceSyscall { - syscall.Unmount(volumeMount.Destination, 0) - } - - if volumeMount.Volume != nil { - if err := volumeMount.Volume.Unmount(); err != nil { - return err - } - } - } - - return nil -} - -func (container *Container) PrepareStorage() error { - return nil -} - -func (container *Container) CleanupStorage() error { - return nil + return volumeMounts, nil } func (container *Container) networkMounts() []execdriver.Mount { var mounts []execdriver.Mount + shared := container.hostConfig.NetworkMode.IsContainer() if container.ResolvConfPath != "" { - label.SetFileLabel(container.ResolvConfPath, container.MountLabel) - mounts = append(mounts, execdriver.Mount{ - Source: container.ResolvConfPath, - Destination: "/etc/resolv.conf", - Writable: !container.hostConfig.ReadonlyRootfs, - Private: true, - }) + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + writable := !container.hostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, execdriver.Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Private: true, + }) + } } if container.HostnamePath != "" { - label.SetFileLabel(container.HostnamePath, container.MountLabel) + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + writable := !container.hostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, execdriver.Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Private: true, + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + label.Relabel(container.HostsPath, container.MountLabel, shared) + writable := !container.hostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, execdriver.Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Private: true, + }) + } + } + return mounts +} + +func (container *Container) copyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + path, err := v.Mount() + if err != nil { + return err + } + + if err := copyExistingContents(rootfs, path); err != nil { + return err + } + + return v.Unmount() +} + +func (container *Container) shmPath() (string, error) { + return container.getRootResourcePath("shm") +} +func (container *Container) mqueuePath() (string, error) { + return container.getRootResourcePath("mqueue") +} + +func (container *Container) hasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +func (daemon *Daemon) setupIpcDirs(container *Container) error { + rootUID, rootGID := daemon.GetRemappedUIDGID() + if !container.hasMountFor("/dev/shm") { + shmPath, err := container.shmPath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + return err + } + + if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel("mode=1777,size=65536k", container.getMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + return err + } + } + + if !container.hasMountFor("/dev/mqueue") { + mqueuePath, err := container.mqueuePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAs(mqueuePath, 0700, rootUID, rootGID); err != nil { + return err + } + + if err := syscall.Mount("mqueue", mqueuePath, "mqueue", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), ""); err != nil { + return fmt.Errorf("mounting mqueue mqueue : %s", err) + } + if err := os.Chown(mqueuePath, rootUID, rootGID); err != nil { + return err + } + } + + return nil +} + +func (container *Container) unmountIpcMounts(unmount func(pth string) error) { + if container.hostConfig.IpcMode.IsContainer() || container.hostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.hasMountFor("/dev/shm") { + shmPath, err := container.shmPath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + + } + } + + if !container.hasMountFor("/dev/mqueue") { + mqueuePath, err := container.mqueuePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if mqueuePath != "" { + if err := unmount(mqueuePath); err != nil { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", mqueuePath, err)) + } + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +func (container *Container) ipcMounts() []execdriver.Mount { + var mounts []execdriver.Mount + + if !container.hasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) mounts = append(mounts, execdriver.Mount{ - Source: container.HostnamePath, - Destination: "/etc/hostname", - Writable: !container.hostConfig.ReadonlyRootfs, + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, Private: true, }) } - if container.HostsPath != "" { - label.SetFileLabel(container.HostsPath, container.MountLabel) + + if !container.hasMountFor("/dev/mqueue") { + label.SetFileLabel(container.MqueuePath, container.MountLabel) mounts = append(mounts, execdriver.Mount{ - Source: container.HostsPath, - Destination: "/etc/hosts", - Writable: !container.hostConfig.ReadonlyRootfs, + Source: container.MqueuePath, + Destination: "/dev/mqueue", + Writable: true, Private: true, }) } return mounts } -func (container *Container) addBindMountPoint(name, source, destination string, rw bool) { - container.MountPoints[destination] = &mountPoint{ - Name: name, - Source: source, - Destination: destination, - RW: rw, - } -} - -func (container *Container) addLocalMountPoint(name, destination string, rw bool) { - container.MountPoints[destination] = &mountPoint{ - Name: name, - Driver: volume.DefaultDriverName, - Destination: destination, - RW: rw, - } -} - -func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) { - container.MountPoints[destination] = &mountPoint{ - Name: vol.Name(), - Driver: vol.DriverName(), - Destination: destination, - RW: rw, - Volume: vol, - } -} - -func (container *Container) isDestinationMounted(destination string) bool { - return container.MountPoints[destination] != nil -} - -func (container *Container) prepareMountPoints() error { - for _, config := range container.MountPoints { - if len(config.Driver) > 0 { - v, err := createVolume(config.Name, config.Driver) - if err != nil { - return err - } - config.Volume = v - } - } - return nil -} - -func (container *Container) removeMountPoints() error { - for _, m := range container.MountPoints { - if m.Volume != nil { - if err := removeVolume(m.Volume); err != nil { - return err - } - } - } - return nil +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) } diff --git a/vendor/github.com/docker/docker/daemon/container_windows.go b/vendor/github.com/docker/docker/daemon/container_windows.go index 410f7d66..fd2dcb9b 100644 --- a/vendor/github.com/docker/docker/daemon/container_windows.go +++ b/vendor/github.com/docker/docker/daemon/container_windows.go @@ -3,21 +3,20 @@ package daemon import ( - "fmt" - "path/filepath" "strings" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/daemon/graphdriver/windows" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/archive" - "github.com/microsoft/hcsshim" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" ) -// This is deliberately empty on Windows as the default path will be set by +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by // the container. Docker has no context of what the default path should be. const DefaultPathEnv = "" +// Container holds fields specific to the Windows implementation. See +// CommonContainer for standard fields common to all containers. type Container struct { CommonContainer @@ -28,15 +27,7 @@ func killProcessDirectly(container *Container) error { return nil } -func (container *Container) setupContainerDns() error { - return nil -} - -func (container *Container) updateParentsHosts() error { - return nil -} - -func (container *Container) setupLinkedContainers() ([]string, error) { +func (daemon *Daemon) setupLinkedContainers(container *Container) ([]string, error) { return nil, nil } @@ -45,7 +36,17 @@ func (container *Container) createDaemonEnvironment(linkedEnv []string) []string return container.Config.Env } -func (container *Container) initializeNetworking() error { +func (daemon *Daemon) initializeNetworking(container *Container) error { + return nil +} + +// ConnectToNetwork connects a container to the network +func (daemon *Daemon) ConnectToNetwork(container *Container, idOrName string) error { + return nil +} + +// DisconnectFromNetwork disconnects a container from, the network +func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error { return nil } @@ -53,34 +54,37 @@ func (container *Container) setupWorkingDirectory() error { return nil } -func populateCommand(c *Container, env []string) error { +func (daemon *Daemon) populateCommand(c *Container, env []string) error { en := &execdriver.Network{ - Mtu: c.daemon.config.Mtu, Interface: nil, } parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) switch parts[0] { - case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { en.Interface = &execdriver.NetworkInterface{ - MacAddress: c.Config.MacAddress, - Bridge: c.daemon.config.Bridge.VirtualSwitchName, + MacAddress: c.Config.MacAddress, + Bridge: daemon.configStore.Bridge.VirtualSwitchName, + PortBindings: c.hostConfig.PortBindings, + + // TODO Windows. Include IPAddress. There already is a + // property IPAddress on execDrive.CommonNetworkInterface, + // but there is no CLI option in docker to pass through + // an IPAddress on docker run. } } default: - return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) + return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.hostConfig.NetworkMode) } - pid := &execdriver.Pid{} - - // TODO Windows. This can probably be factored out. - pid.HostPid = c.hostConfig.PidMode.IsHost() - - // TODO Windows. Resource controls to be implemented later. - resources := &execdriver.Resources{} + // TODO Windows. More resource controls to be implemented later. + resources := &execdriver.Resources{ + CommonResources: execdriver.CommonResources{ + CPUShares: c.hostConfig.CPUShares, + }, + } // TODO Windows. Further refactoring required (privileged/user) processConfig := execdriver.ProcessConfig{ @@ -94,118 +98,95 @@ func populateCommand(c *Container, env []string) error { processConfig.Env = env - var layerFolder string var layerPaths []string - - // The following is specific to the Windows driver. We do this to - // enable VFS to continue operating for development purposes. - if wd, ok := c.daemon.driver.(*windows.WindowsGraphDriver); ok { - var err error - var img *image.Image - var ids []string - - if img, err = c.daemon.graph.Get(c.ImageID); err != nil { - return fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) - } - if ids, err = c.daemon.graph.ParentLayerIds(img); err != nil { - return fmt.Errorf("Failed to get parentlayer ids %s", img.ID) - } - layerPaths = wd.LayerIdsToPaths(ids) - layerFolder = filepath.Join(wd.Info().HomeDir, filepath.Base(c.ID)) + img, err := daemon.graph.Get(c.ImageID) + if err != nil { + return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err) } + for i := img; i != nil && err == nil; i, err = daemon.graph.GetParent(i) { + lp, err := daemon.driver.Get(i.ID, "") + if err != nil { + return derr.ErrorCodeGetLayer.WithArgs(daemon.driver.String(), i.ID, err) + } + layerPaths = append(layerPaths, lp) + err = daemon.driver.Put(i.ID) + if err != nil { + return derr.ErrorCodePutLayer.WithArgs(daemon.driver.String(), i.ID, err) + } + } + m, err := daemon.driver.GetMetadata(c.ID) + if err != nil { + return derr.ErrorCodeGetLayerMetadata.WithArgs(err) + } + layerFolder := m["dir"] - // TODO Windows: Factor out remainder of unused fields. c.command = &execdriver.Command{ - ID: c.ID, - Rootfs: c.RootfsPath(), - ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, - InitPath: "/.dockerinit", - WorkingDir: c.Config.WorkingDir, - Network: en, - Pid: pid, - Resources: resources, - CapAdd: c.hostConfig.CapAdd.Slice(), - CapDrop: c.hostConfig.CapDrop.Slice(), - ProcessConfig: processConfig, - ProcessLabel: c.GetProcessLabel(), - MountLabel: c.GetMountLabel(), - FirstStart: !c.HasBeenStartedBefore, - LayerFolder: layerFolder, - LayerPaths: layerPaths, + CommonCommand: execdriver.CommonCommand{ + ID: c.ID, + Rootfs: c.rootfsPath(), + InitPath: "/.dockerinit", + WorkingDir: c.Config.WorkingDir, + Network: en, + MountLabel: c.getMountLabel(), + Resources: resources, + ProcessConfig: processConfig, + ProcessLabel: c.getProcessLabel(), + }, + FirstStart: !c.HasBeenStartedBefore, + LayerFolder: layerFolder, + LayerPaths: layerPaths, + Hostname: c.Config.Hostname, + Isolated: c.hostConfig.Isolation.IsHyperV(), } return nil } -// GetSize, return real size, virtual size -func (container *Container) GetSize() (int64, int64) { +// getSize returns real size & virtual size +func (daemon *Daemon) getSize(container *Container) (int64, int64) { // TODO Windows return 0, 0 } -func (container *Container) AllocateNetwork() error { +// setNetworkNamespaceKey is a no-op on Windows. +func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error { return nil } -func (container *Container) ExportRw() (archive.Archive, error) { - if container.IsRunning() { - return nil, fmt.Errorf("Cannot export a running container.") - } - // TODO Windows. Implementation (different to Linux) - return nil, nil -} - -func (container *Container) UpdateNetwork() error { +// allocateNetwork is a no-op on Windows. +func (daemon *Daemon) allocateNetwork(container *Container) error { return nil } -func (container *Container) ReleaseNetwork() { -} - -func (container *Container) RestoreNetwork() error { +func (daemon *Daemon) updateNetwork(container *Container) error { return nil } -func (container *Container) UnmountVolumes(forceSyscall bool) error { +func (daemon *Daemon) releaseNetwork(container *Container) { +} + +// appendNetworkMounts appends any network mounts to the array of mount points passed in. +// Windows does not support network mounts (not to be confused with SMB network mounts), so +// this is a no-op. +func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { + return volumeMounts, nil +} + +func (daemon *Daemon) setupIpcDirs(container *Container) error { return nil } -func (container *Container) PrepareStorage() error { - if wd, ok := container.daemon.driver.(*windows.WindowsGraphDriver); ok { - // Get list of paths to parent layers. - var ids []string - if container.ImageID != "" { - img, err := container.daemon.graph.Get(container.ImageID) - if err != nil { - return err - } +func (container *Container) unmountIpcMounts(unmount func(pth string) error) { +} - ids, err = container.daemon.graph.ParentLayerIds(img) - if err != nil { - return err - } - } - - if err := hcsshim.PrepareLayer(wd.Info(), container.ID, wd.LayerIdsToPaths(ids)); err != nil { - return err - } - } +func detachMounted(path string) error { return nil } -func (container *Container) CleanupStorage() error { - if wd, ok := container.daemon.driver.(*windows.WindowsGraphDriver); ok { - return hcsshim.UnprepareLayer(wd.Info(), container.ID) - } +func (container *Container) ipcMounts() []execdriver.Mount { return nil } -// TODO Windows. This can be further factored out. Used in daemon.go -func (container *Container) prepareMountPoints() error { - return nil -} - -// TODO Windows. This can be further factored out. Used in delete.go -func (container *Container) removeMountPoints() error { - return nil +func getDefaultRouteMtu() (int, error) { + return -1, errSystemNotSupported } diff --git a/vendor/github.com/docker/docker/daemon/create.go b/vendor/github.com/docker/docker/daemon/create.go index 1d0a9a5b..fb20d213 100644 --- a/vendor/github.com/docker/docker/daemon/create.go +++ b/vendor/github.com/docker/docker/daemon/create.go @@ -1,110 +1,119 @@ package daemon import ( - "fmt" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/graph" + "github.com/docker/docker/api/types" + derr "github.com/docker/docker/errors" "github.com/docker/docker/image" - "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" "github.com/opencontainers/runc/libcontainer/label" ) -func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig) (string, []string, error) { - if config == nil { - return "", nil, fmt.Errorf("Config cannot be empty in order to create a container") +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *runconfig.Config + HostConfig *runconfig.HostConfig + AdjustCPUShares bool +} + +// ContainerCreate takes configs and creates a container. +func (daemon *Daemon) ContainerCreate(params *ContainerCreateConfig) (types.ContainerCreateResponse, error) { + if params.Config == nil { + return types.ContainerCreateResponse{}, derr.ErrorCodeEmptyConfig } - daemon.adaptContainerSettings(hostConfig) - warnings, err := daemon.verifyContainerSettings(hostConfig, config) + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config) if err != nil { - return "", warnings, err + return types.ContainerCreateResponse{ID: "", Warnings: warnings}, err } - container, buildWarnings, err := daemon.Create(config, hostConfig, name) + daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + + container, err := daemon.create(params) if err != nil { - if daemon.Graph().IsNotExist(err, config.Image) { - _, tag := parsers.ParseRepositoryTag(config.Image) - if tag == "" { - tag = graph.DefaultTag - } - return "", warnings, fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag) - } - return "", warnings, err + return types.ContainerCreateResponse{ID: "", Warnings: warnings}, daemon.graphNotExistToErrcode(params.Config.Image, err) } - warnings = append(warnings, buildWarnings...) - - return container.ID, warnings, nil + return types.ContainerCreateResponse{ID: container.ID, Warnings: warnings}, nil } // Create creates a new container from the given configuration with a given name. -func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (*Container, []string, error) { +func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, retErr error) { var ( container *Container - warnings []string img *image.Image imgID string err error ) - if config.Image != "" { - img, err = daemon.repositories.LookupImage(config.Image) + if params.Config.Image != "" { + img, err = daemon.repositories.LookupImage(params.Config.Image) if err != nil { - return nil, nil, err + return nil, err } if err = daemon.graph.CheckDepth(img); err != nil { - return nil, nil, err + return nil, err } imgID = img.ID } - if err := daemon.mergeAndVerifyConfig(config, img); err != nil { - return nil, nil, err + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err } - if !config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { - warnings = append(warnings, "IPv4 forwarding is disabled.") + + if params.HostConfig == nil { + params.HostConfig = &runconfig.HostConfig{} } - if hostConfig == nil { - hostConfig = &runconfig.HostConfig{} - } - if hostConfig.SecurityOpt == nil { - hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) + if params.HostConfig.SecurityOpt == nil { + params.HostConfig.SecurityOpt, err = daemon.generateSecurityOpt(params.HostConfig.IpcMode, params.HostConfig.PidMode) if err != nil { - return nil, nil, err + return nil, err } } - if container, err = daemon.newContainer(name, config, imgID); err != nil { - return nil, nil, err + if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { + return nil, err } + defer func() { + if retErr != nil { + if err := daemon.rm(container, false); err != nil { + logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) + } + } + }() + if err := daemon.Register(container); err != nil { - return nil, nil, err + return nil, err } if err := daemon.createRootfs(container); err != nil { - return nil, nil, err + return nil, err } - if err := daemon.setHostConfig(container, hostConfig); err != nil { - return nil, nil, err + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err } - if err := container.Mount(); err != nil { - return nil, nil, err - } - defer container.Unmount() + defer func() { + if retErr != nil { + if err := daemon.removeMountPoints(container, true); err != nil { + logrus.Error(err) + } + } + }() - if err := createContainerPlatformSpecificSettings(container, config); err != nil { - return nil, nil, err + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig, img); err != nil { + return nil, err } - if err := container.ToDisk(); err != nil { + if err := container.toDiskLocking(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) - return nil, nil, err + return nil, err } - container.LogEvent("create") - return container, warnings, nil + daemon.LogContainerEvent(container, "create") + return container, nil } -func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) { +func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) { if ipcMode.IsHost() || pidMode.IsHost() { return label.DisableSecOpt(), nil } @@ -118,3 +127,22 @@ func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode run } return nil, nil } + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the remote API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts) + if err != nil { + return nil, err + } + + // keep "docker run -v existing_volume:/foo --volume-driver other_driver" work + if (driverName != "" && v.DriverName() != driverName) || (driverName == "" && v.DriverName() != volume.DefaultDriverName) { + return nil, derr.ErrorVolumeNameTaken.WithArgs(name, v.DriverName()) + } + return volumeToAPIType(v), nil +} diff --git a/vendor/github.com/docker/docker/daemon/create_unix.go b/vendor/github.com/docker/docker/daemon/create_unix.go index 124cec0a..4975c350 100644 --- a/vendor/github.com/docker/docker/daemon/create_unix.go +++ b/vendor/github.com/docker/docker/daemon/create_unix.go @@ -3,30 +3,28 @@ package daemon import ( - "fmt" "os" "path/filepath" - "strings" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" "github.com/opencontainers/runc/libcontainer/label" ) // createContainerPlatformSpecificSettings performs platform specific container create functionality -func createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config) error { +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + for spec := range config.Volumes { - var ( - name, destination string - parts = strings.Split(spec, ":") - ) - switch len(parts) { - case 2: - name, destination = parts[0], filepath.Clean(parts[1]) - default: - name = stringid.GenerateNonCryptoID() - destination = filepath.Clean(parts[0]) - } + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.isDestinationMounted(destination) { @@ -39,19 +37,33 @@ func createContainerPlatformSpecificSettings(container *Container, config *runco stat, err := os.Stat(path) if err == nil && !stat.IsDir() { - return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + return derr.ErrorCodeMountOverFile.WithArgs(path) } - v, err := createVolume(name, config.VolumeDriver) + volumeDriver := hostConfig.VolumeDriver + if destination != "" && img != nil { + if _, ok := img.ContainerConfig.Volumes[destination]; ok { + // check for whether bind is not specified and then set to local + if _, ok := container.MountPoints[destination]; !ok { + volumeDriver = volume.DefaultDriverName + } + } + } + + v, err := daemon.createVolume(name, volumeDriver, nil) if err != nil { return err } - if err := label.Relabel(v.Path(), container.MountLabel, "z"); err != nil { + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { return err } - if err := container.copyImagePathContent(v, destination); err != nil { - return err + // never attempt to copy existing content in a container FS to a shared volume + if v.DriverName() == volume.DefaultDriverName { + if err := container.copyImagePathContent(v, destination); err != nil { + return err + } } container.addMountPointWithVolume(destination, v, true) diff --git a/vendor/github.com/docker/docker/daemon/create_windows.go b/vendor/github.com/docker/docker/daemon/create_windows.go index 401d68d8..a95e667b 100644 --- a/vendor/github.com/docker/docker/daemon/create_windows.go +++ b/vendor/github.com/docker/docker/daemon/create_windows.go @@ -1,10 +1,83 @@ package daemon import ( + "fmt" + + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" ) // createContainerPlatformSpecificSettings performs platform specific container create functionality -func createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config) error { +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error { + for spec := range config.Volumes { + + mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver) + if err != nil { + return fmt.Errorf("Unrecognised volume spec: %v", err) + } + + // If the mountpoint doesn't have a name, generate one. + if len(mp.Name) == 0 { + mp.Name = stringid.GenerateNonCryptoID() + } + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.isDestinationMounted(mp.Destination) { + continue + } + + volumeDriver := hostConfig.VolumeDriver + if mp.Destination != "" && img != nil { + if _, ok := img.ContainerConfig.Volumes[mp.Destination]; ok { + // check for whether bind is not specified and then set to local + if _, ok := container.MountPoints[mp.Destination]; !ok { + volumeDriver = volume.DefaultDriverName + } + } + } + + // Create the volume in the volume driver. If it doesn't exist, + // a new one will be created. + v, err := daemon.createVolume(mp.Name, volumeDriver, nil) + if err != nil { + return err + } + + // FIXME Windows: This code block is present in the Linux version and + // allows the contents to be copied to the container FS prior to it + // being started. However, the function utilises the FollowSymLinkInScope + // path which does not cope with Windows volume-style file paths. There + // is a seperate effort to resolve this (@swernli), so this processing + // is deferred for now. A case where this would be useful is when + // a dockerfile includes a VOLUME statement, but something is created + // in that directory during the dockerfile processing. What this means + // on Windows for TP4 is that in that scenario, the contents will not + // copied, but that's (somewhat) OK as HCS will bomb out soon after + // at it doesn't support mapped directories which have contents in the + // destination path anyway. + // + // Example for repro later: + // FROM windowsservercore + // RUN mkdir c:\myvol + // RUN copy c:\windows\system32\ntdll.dll c:\myvol + // VOLUME "c:\myvol" + // + // Then + // docker build -t vol . + // docker run -it --rm vol cmd <-- This is where HCS will error out. + // + // // never attempt to copy existing content in a container FS to a shared volume + // if v.DriverName() == volume.DefaultDriverName { + // if err := container.copyImagePathContent(v, mp.Destination); err != nil { + // return err + // } + // } + + // Add it to container.MountPoints + container.addMountPointWithVolume(mp.Destination, v, mp.RW) + } return nil } diff --git a/vendor/github.com/docker/docker/daemon/daemon.go b/vendor/github.com/docker/docker/daemon/daemon.go index 81272c84..6336faf5 100644 --- a/vendor/github.com/docker/docker/daemon/daemon.go +++ b/vendor/github.com/docker/docker/daemon/daemon.go @@ -1,3 +1,8 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. package daemon import ( @@ -7,7 +12,6 @@ import ( "io/ioutil" "os" "path/filepath" - "regexp" "runtime" "strings" "sync" @@ -15,36 +19,52 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/daemon/events" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/execdrivers" "github.com/docker/docker/daemon/graphdriver" - _ "github.com/docker/docker/daemon/graphdriver/vfs" + _ "github.com/docker/docker/daemon/graphdriver/vfs" // register vfs "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/network" + derr "github.com/docker/docker/errors" "github.com/docker/docker/graph" "github.com/docker/docker/image" - "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/discovery" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" - "github.com/docker/docker/trust" + "github.com/docker/docker/utils" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" "github.com/docker/libnetwork" - "github.com/opencontainers/runc/libcontainer/netlink" + lntypes "github.com/docker/libnetwork/types" + "github.com/opencontainers/runc/libcontainer" ) var ( - validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` - validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) + validContainerNameChars = utils.RestrictedNameChars + validContainerNamePattern = utils.RestrictedNamePattern - ErrSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") + errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") ) type contStore struct { @@ -78,10 +98,11 @@ func (c *contStore) List() []*Container { containers.Add(cont) } c.Unlock() - containers.Sort() + containers.sort() return *containers } +// Daemon holds information about the Docker daemon. type Daemon struct { ID string repository string @@ -91,9 +112,8 @@ type Daemon struct { graph *graph.Graph repositories *graph.TagStore idIndex *truncindex.TruncIndex - sysInfo *sysinfo.SysInfo - config *Config - containerGraph *graphdb.Database + configStore *Config + containerGraphDB *graphdb.Database driver graphdriver.Driver execDriver execdriver.Driver statsCollector *statsCollector @@ -101,7 +121,12 @@ type Daemon struct { RegistryService *registry.Service EventsService *events.Events netController libnetwork.NetworkController + volumes *store.VolumeStore + discoveryWatcher discovery.Watcher root string + shutdown bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap } // Get looks for a container using the provided information, which could be @@ -123,11 +148,15 @@ func (daemon *Daemon) Get(prefixOrName string) (*Container, error) { return containerByName, nil } - containerId, indexError := daemon.idIndex.Get(prefixOrName) + containerID, indexError := daemon.idIndex.Get(prefixOrName) if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + return nil, derr.ErrorCodeNoSuchContainer.WithArgs(prefixOrName) + } return nil, indexError } - return daemon.containers.Get(containerId), nil + return daemon.containers.Get(containerID), nil } // Exists returns a true if a container of the specified ID or name exists, @@ -137,6 +166,12 @@ func (daemon *Daemon) Exists(id string) bool { return c != nil } +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.Get(id) + return c.State.isPaused() +} + func (daemon *Daemon) containerRoot(id string) string { return filepath.Join(daemon.repository, id) } @@ -146,26 +181,20 @@ func (daemon *Daemon) containerRoot(id string) string { func (daemon *Daemon) load(id string) (*Container, error) { container := daemon.newBaseContainer(id) - if err := container.FromDisk(); err != nil { + if err := container.fromDisk(); err != nil { return nil, err } if container.ID != id { - return &container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) } - return &container, nil + return container, nil } // Register makes a container object usable by the daemon as -// This is a wrapper for register func (daemon *Daemon) Register(container *Container) error { - return daemon.register(container, true) -} - -// register makes a container object usable by the daemon as -func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { - if container.daemon != nil || daemon.Exists(container.ID) { + if daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { @@ -175,11 +204,9 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err return err } - container.daemon = daemon - // Attach to stdout and stderr - container.stderr = broadcastwriter.New() - container.stdout = broadcastwriter.New() + container.stderr = new(broadcaster.Unbuffered) + container.stdout = new(broadcaster.Unbuffered) // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() @@ -193,33 +220,36 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err // we'll waste time if we update it for every container daemon.idIndex.Add(container.ID) + if container.IsRunning() { + logrus.Debugf("killing old running container %s", container.ID) + // Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit + container.setStoppedLocking(&execdriver.ExitStatus{ExitCode: 137}) + // use the current driver and ensure that the container is dead x.x + cmd := &execdriver.Command{ + CommonCommand: execdriver.CommonCommand{ + ID: container.ID, + }, + } + daemon.execDriver.Terminate(cmd) + + container.unmountIpcMounts(mount.Unmount) + + if err := daemon.Unmount(container); err != nil { + logrus.Debugf("unmount error %s", err) + } + if err := container.toDiskLocking(); err != nil { + logrus.Errorf("Error saving stopped state to disk: %v", err) + } + } + if err := daemon.verifyVolumesInfo(container); err != nil { return err } - if err := container.prepareMountPoints(); err != nil { + if err := daemon.prepareMountPoints(container); err != nil { return err } - if container.IsRunning() { - logrus.Debugf("killing old running container %s", container.ID) - // Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit - container.SetStopped(&execdriver.ExitStatus{ExitCode: 137}) - - // use the current driver and ensure that the container is dead x.x - cmd := &execdriver.Command{ - ID: container.ID, - } - daemon.execDriver.Terminate(cmd) - - if err := container.Unmount(); err != nil { - logrus.Debugf("unmount error %s", err) - } - if err := container.ToDisk(); err != nil { - logrus.Errorf("Error saving stopped state to disk: %v", err) - } - } - return nil } @@ -231,7 +261,7 @@ func (daemon *Daemon) ensureName(container *Container) error { } container.Name = name - if err := container.ToDisk(); err != nil { + if err := container.toDiskLocking(); err != nil { logrus.Errorf("Error saving container name to disk: %v", err) } } @@ -245,7 +275,7 @@ func (daemon *Daemon) restore() error { } var ( - debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") + debug = os.Getenv("DEBUG") != "" currentDriver = daemon.driver.String() containers = make(map[string]*cr) ) @@ -279,7 +309,7 @@ func (daemon *Daemon) restore() error { } } - if entities := daemon.containerGraph.List("/", -1); entities != nil { + if entities := daemon.containerGraphDB.List("/", -1); entities != nil { for _, p := range entities.Paths() { if !debug && logrus.GetLevel() == logrus.InfoLevel { fmt.Print(".") @@ -308,17 +338,19 @@ func (daemon *Daemon) restore() error { } } - if err := daemon.register(container, false); err != nil { - logrus.Debugf("Failed to register container %s: %s", container.ID, err) + if err := daemon.Register(container); err != nil { + logrus.Errorf("Failed to register container %s: %s", container.ID, err) + // The container register failed should not be started. + return } // check the restart policy on the containers and restart any container with // the restart policy of "always" - if daemon.config.AutoRestart && container.shouldRestart() { + if daemon.configStore.AutoRestart && container.shouldRestart() { logrus.Debugf("Starting container %s", container.ID) - if err := container.Start(); err != nil { - logrus.Debugf("Failed to start container %s: %s", container.ID, err) + if err := daemon.containerStart(container); err != nil { + logrus.Errorf("Failed to start container %s: %s", container.ID, err) } } }(c.container, c.registered) @@ -347,7 +379,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image. return nil } -func (daemon *Daemon) generateIdAndName(name string) (string, string, error) { +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { var ( err error id = stringid.GenerateNonCryptoID() @@ -376,27 +408,19 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) { name = "/" + name } - if _, err := daemon.containerGraph.Set(name, id); err != nil { + if _, err := daemon.containerGraphDB.Set(name, id); err != nil { if !graphdb.IsNonUniqueNameError(err) { return "", err } conflictingContainer, err := daemon.GetByName(name) if err != nil { - if strings.Contains(err.Error(), "Could not find entity") { - return "", err - } - - // Remove name and continue starting the container - if err := daemon.containerGraph.Delete(name); err != nil { - return "", err - } - } else { - nameAsKnownByUser := strings.TrimPrefix(name, "/") - return "", fmt.Errorf( - "Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", nameAsKnownByUser, - stringid.TruncateID(conflictingContainer.ID)) + return "", err } + return "", fmt.Errorf( + "Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", strings.TrimPrefix(name, "/"), + stringid.TruncateID(conflictingContainer.ID)) + } return name, nil } @@ -409,7 +433,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) { name = "/" + name } - if _, err := daemon.containerGraph.Set(name, id); err != nil { + if _, err := daemon.containerGraphDB.Set(name, id); err != nil { if !graphdb.IsNonUniqueNameError(err) { return "", err } @@ -419,7 +443,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) { } name = "/" + stringid.TruncateID(id) - if _, err := daemon.containerGraph.Set(name, id); err != nil { + if _, err := daemon.containerGraphDB.Set(name, id); err != nil { return "", err } return name, nil @@ -427,36 +451,27 @@ func (daemon *Daemon) generateNewName(id string) (string, error) { func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) { // Generate default hostname - // FIXME: the lxc template no longer needs to set a default hostname if config.Hostname == "" { config.Hostname = id[:12] } } -func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *runconfig.Entrypoint, configCmd *runconfig.Command) (string, []string) { - var ( - entrypoint string - args []string - ) - +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *stringutils.StrSlice, configCmd *stringutils.StrSlice) (string, []string) { cmdSlice := configCmd.Slice() if configEntrypoint.Len() != 0 { eSlice := configEntrypoint.Slice() - entrypoint = eSlice[0] - args = append(eSlice[1:], cmdSlice...) - } else { - entrypoint = cmdSlice[0] - args = cmdSlice[1:] + return eSlice[0], append(eSlice[1:], cmdSlice...) } - return entrypoint, args + return cmdSlice[0], cmdSlice[1:] } func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID string) (*Container, error) { var ( - id string - err error + id string + err error + noExplicitName = name == "" ) - id, name, err = daemon.generateIdAndName(name) + id, name, err = daemon.generateIDAndName(name) if err != nil { return nil, err } @@ -471,14 +486,16 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID base.Config = config base.hostConfig = &runconfig.HostConfig{} base.ImageID = imgID - base.NetworkSettings = &network.Settings{} + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} base.Name = name base.Driver = daemon.driver.String() - base.ExecDriver = daemon.execDriver.Name() - return &base, err + return base, err } +// GetFullContainerName returns a constructed container name. I think +// it has to do with the fact that a container is a file on disk and +// this is sort of just creating a file name. func GetFullContainerName(name string) (string, error) { if name == "" { return "", fmt.Errorf("Container name cannot be empty") @@ -489,12 +506,13 @@ func GetFullContainerName(name string) (string, error) { return name, nil } +// GetByName returns a container given a name. func (daemon *Daemon) GetByName(name string) (*Container, error) { fullName, err := GetFullContainerName(name) if err != nil { return nil, err } - entity := daemon.containerGraph.Get(fullName) + entity := daemon.containerGraphDB.Get(fullName) if entity == nil { return nil, fmt.Errorf("Could not find entity for %s", name) } @@ -505,14 +523,52 @@ func (daemon *Daemon) GetByName(name string) (*Container, error) { return e, nil } -func (daemon *Daemon) Children(name string) (map[string]*Container, error) { +// GetEventFilter returns a filters.Filter for a set of filters +func (daemon *Daemon) GetEventFilter(filter filters.Args) *events.Filter { + // incoming container filter can be name, id or partial id, convert to + // a full container id + for i, cn := range filter["container"] { + c, err := daemon.Get(cn) + if err != nil { + filter["container"][i] = "" + } else { + filter["container"][i] = c.ID + } + } + return events.NewFilter(filter, daemon.GetLabels) +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents() ([]*jsonmessage.JSONMessage, chan interface{}, func()) { + return daemon.EventsService.Subscribe() +} + +// GetLabels for a container or image id +func (daemon *Daemon) GetLabels(id string) map[string]string { + // TODO: TestCase + container := daemon.containers.Get(id) + if container != nil { + return container.Config.Labels + } + + img, err := daemon.repositories.LookupImage(id) + if err == nil { + return img.ContainerConfig.Labels + } + return nil +} + +// children returns all child containers of the container with the +// given name. The containers are returned as a map from the container +// name to a pointer to Container. +func (daemon *Daemon) children(name string) (map[string]*Container, error) { name, err := GetFullContainerName(name) if err != nil { return nil, err } children := make(map[string]*Container) - err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { + err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error { c, err := daemon.Get(e.ID()) if err != nil { return err @@ -527,24 +583,28 @@ func (daemon *Daemon) Children(name string) (map[string]*Container, error) { return children, nil } -func (daemon *Daemon) Parents(name string) ([]string, error) { +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(name string) ([]string, error) { name, err := GetFullContainerName(name) if err != nil { return nil, err } - return daemon.containerGraph.Parents(name) + return daemon.containerGraphDB.Parents(name) } -func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error { +func (daemon *Daemon) registerLink(parent, child *Container, alias string) error { fullName := filepath.Join(parent.Name, alias) - if !daemon.containerGraph.Exists(fullName) { - _, err := daemon.containerGraph.Set(fullName, child.ID) + if !daemon.containerGraphDB.Exists(fullName) { + _, err := daemon.containerGraphDB.Set(fullName, child.ID) return err } return nil } +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) { setDefaultMtu(config) @@ -557,8 +617,8 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo config.DisableBridge = isBridgeNetworkDisabled(config) // Verify the platform is supported as a daemon - if runtime.GOOS != "linux" && runtime.GOOS != "windows" { - return nil, ErrSystemNotSupported + if !platformSupported { + return nil, errSystemNotSupported } // Validate platform-specific requirements @@ -570,6 +630,15 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo // on Windows to dump Go routine stacks setupDumpStackTrap() + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // get the canonical path to the Docker root directory var realRoot string if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { @@ -580,14 +649,13 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) } } - config.Root = realRoot - // Create the root directory if it doesn't exists - if err := system.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { + + if err = setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { return nil, err } // set up the tmpDir to use a canonical path - tmp, err := tempDir(config.Root) + tmp, err := tempDir(config.Root, rootUID, rootGID) if err != nil { return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) } @@ -601,7 +669,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo graphdriver.DefaultDriver = config.GraphDriver // Load storage driver - driver, err := graphdriver.New(config.Root, config.GraphOptions) + driver, err := graphdriver.New(config.Root, config.GraphOptions, uidMaps, gidMaps) if err != nil { return nil, fmt.Errorf("error initializing graphdriver: %v", err) } @@ -634,7 +702,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo daemonRepo := filepath.Join(config.Root, "containers") - if err := system.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err } @@ -644,13 +712,14 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo } logrus.Debug("Creating images graph") - g, err := graph.NewGraph(filepath.Join(config.Root, "graph"), d.driver) + g, err := graph.NewGraph(filepath.Join(config.Root, "graph"), d.driver, uidMaps, gidMaps) if err != nil { return nil, err } // Configure the volumes driver - if err := configureVolumes(config); err != nil { + volStore, err := configureVolumes(config, rootUID, rootGID) + if err != nil { return nil, err } @@ -661,13 +730,9 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo trustDir := filepath.Join(config.Root, "trust") - if err := system.MkdirAll(trustDir, 0700); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(trustDir, 0700); err != nil { return nil, err } - trustService, err := trust.NewTrustStore(trustDir) - if err != nil { - return nil, fmt.Errorf("could not create trust store: %s", err) - } eventsService := events.New() logrus.Debug("Creating repository list") @@ -676,14 +741,36 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo Key: trustKey, Registry: registryService, Events: eventsService, - Trust: trustService, } repositories, err := graph.NewTagStore(filepath.Join(config.Root, "repositories-"+d.driver.String()), tagCfg) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store repositories-%s: %s", d.driver.String(), err) } - d.netController, err = initNetworkController(config) + if restorer, ok := d.driver.(graphdriver.ImageRestorer); ok { + if _, err := restorer.RestoreCustomImages(repositories, g); err != nil { + return nil, fmt.Errorf("Couldn't restore custom images: %s", err) + } + } + + // Discovery is only enabled when the daemon is launched with an address to advertise. When + // initialized, the daemon is registered and we can store the discovery backend as its read-only + // DiscoveryWatcher version. + if config.ClusterStore != "" && config.ClusterAdvertise != "" { + advertise, err := discovery.ParseAdvertise(config.ClusterStore, config.ClusterAdvertise) + if err != nil { + return nil, fmt.Errorf("discovery advertise parsing failed (%v)", err) + } + config.ClusterAdvertise = advertise + d.discoveryWatcher, err = initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) + if err != nil { + return nil, fmt.Errorf("discovery initialization failed (%v)", err) + } + } else if config.ClusterAdvertise != "" { + return nil, fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") + } + + d.netController, err = d.initNetworkController(config) if err != nil { return nil, fmt.Errorf("Error initializing network controller: %v", err) } @@ -694,16 +781,9 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo return nil, err } - d.containerGraph = graph + d.containerGraphDB = graph var sysInitPath string - if config.ExecDriver == "lxc" { - initPath, err := configureSysInit(config) - if err != nil { - return nil, err - } - sysInitPath = initPath - } sysInfo := sysinfo.New(false) // Check if Devices cgroup is mounted, it is hard requirement for container security, @@ -712,7 +792,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo return nil, fmt.Errorf("Devices cgroup isn't mounted") } - ed, err := execdrivers.NewDriver(config.ExecDriver, config.ExecOptions, config.ExecRoot, config.Root, sysInitPath, sysInfo) + ed, err := execdrivers.NewDriver(config.ExecOptions, config.ExecRoot, config.Root, sysInitPath, sysInfo) if err != nil { return nil, err } @@ -724,15 +804,22 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo d.graph = g d.repositories = repositories d.idIndex = truncindex.NewTruncIndex([]string{}) - d.sysInfo = sysInfo - d.config = config + d.configStore = config d.sysInitPath = sysInitPath d.execDriver = ed - d.statsCollector = newStatsCollector(1 * time.Second) + d.statsCollector = d.newStatsCollector(1 * time.Second) d.defaultLogConfig = config.LogConfig d.RegistryService = registryService d.EventsService = eventsService + d.volumes = volStore d.root = config.Root + d.uidMaps = uidMaps + d.gidMaps = gidMaps + + if err := d.cleanupMounts(); err != nil { + return nil, err + } + go d.execCommandGC() if err := d.restore(); err != nil { @@ -742,37 +829,76 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo return d, nil } +func (daemon *Daemon) shutdownContainer(c *Container) error { + // TODO(windows): Handle docker restart with paused containers + if c.isPaused() { + // To terminate a process in freezer cgroup, we should send + // SIGTERM to this process then unfreeze it, and the process will + // force to terminate immediately. + logrus.Debugf("Found container %s is paused, sending SIGTERM before unpause it", c.ID) + sig, ok := signal.SignalMap["TERM"] + if !ok { + return fmt.Errorf("System doesn not support SIGTERM") + } + if err := daemon.kill(c, int(sig)); err != nil { + return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) + } + if err := daemon.containerUnpause(c); err != nil { + return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) + } + if _, err := c.WaitStop(10 * time.Second); err != nil { + logrus.Debugf("container %s failed to exit in 10 second of SIGTERM, sending SIGKILL to force", c.ID) + sig, ok := signal.SignalMap["KILL"] + if !ok { + return fmt.Errorf("System does not support SIGKILL") + } + if err := daemon.kill(c, int(sig)); err != nil { + logrus.Errorf("Failed to SIGKILL container %s", c.ID) + } + c.WaitStop(-1 * time.Second) + return err + } + } + // If container failed to exit in 10 seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, 10); err != nil { + return fmt.Errorf("Stop container %s with error: %v", c.ID, err) + } + + c.WaitStop(-1 * time.Second) + return nil +} + +// Shutdown stops the daemon. func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true if daemon.containers != nil { group := sync.WaitGroup{} logrus.Debug("starting clean shutdown of all containers...") for _, container := range daemon.List() { - c := container - if c.IsRunning() { - logrus.Debugf("stopping %s", c.ID) - group.Add(1) - - go func() { - defer group.Done() - // If container failed to exit in 10 seconds of SIGTERM, then using the force - if err := c.Stop(10); err != nil { - logrus.Errorf("Stop container %s with error: %v", c.ID, err) - } - c.WaitStop(-1 * time.Second) - logrus.Debugf("container stopped %s", c.ID) - }() + if !container.IsRunning() { + continue } + logrus.Debugf("stopping %s", container.ID) + group.Add(1) + go func(c *Container) { + defer group.Done() + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + logrus.Debugf("container stopped %s", c.ID) + }(container) } group.Wait() - - // trigger libnetwork GC only if it's initialized - if daemon.netController != nil { - daemon.netController.GC() - } } - if daemon.containerGraph != nil { - if err := daemon.containerGraph.Close(); err != nil { + // trigger libnetwork Stop only if it's initialized + if daemon.netController != nil { + daemon.netController.Stop() + } + + if daemon.containerGraphDB != nil { + if err := daemon.containerGraphDB.Close(); err != nil { logrus.Errorf("Error during container graph.Close(): %v", err) } } @@ -783,11 +909,17 @@ func (daemon *Daemon) Shutdown() error { } } + if err := daemon.cleanupMounts(); err != nil { + return err + } + return nil } +// Mount sets container.basefs +// (is it not set coming in? why is it unset?) func (daemon *Daemon) Mount(container *Container) error { - dir, err := daemon.driver.Get(container.ID, container.GetMountLabel()) + dir, err := daemon.driver.Get(container.ID, container.getMountLabel()) if err != nil { return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) } @@ -806,78 +938,217 @@ func (daemon *Daemon) Mount(container *Container) error { return nil } +// Unmount unsets the container base filesystem func (daemon *Daemon) Unmount(container *Container) error { - daemon.driver.Put(container.ID) - return nil + return daemon.driver.Put(container.ID) } -func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { - return daemon.execDriver.Run(c.command, pipes, startCallback) +// Run uses the execution driver to run a given container +func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) { + hooks := execdriver.Hooks{ + Start: startCallback, + } + hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error { + return daemon.setNetworkNamespaceKey(c.ID, pid) + }) + return daemon.execDriver.Run(c.command, pipes, hooks) } -func (daemon *Daemon) Kill(c *Container, sig int) error { +func (daemon *Daemon) kill(c *Container, sig int) error { return daemon.execDriver.Kill(c.command, sig) } -func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) { +func (daemon *Daemon) stats(c *Container) (*execdriver.ResourceStats, error) { return daemon.execDriver.Stats(c.ID) } -func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) { - c, err := daemon.Get(name) - if err != nil { - return nil, err - } - ch := daemon.statsCollector.collect(c) - return ch, nil +func (daemon *Daemon) subscribeToContainerStats(c *Container) chan interface{} { + return daemon.statsCollector.collect(c) } -func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error { - c, err := daemon.Get(name) +func (daemon *Daemon) unsubscribeToContainerStats(c *Container, ch chan interface{}) { + daemon.statsCollector.unsubscribe(c, ch) +} + +func (daemon *Daemon) changes(container *Container) ([]archive.Change, error) { + initID := fmt.Sprintf("%s-init", container.ID) + return daemon.driver.Changes(container.ID, initID) +} + +func (daemon *Daemon) diff(container *Container) (archive.Archive, error) { + initID := fmt.Sprintf("%s-init", container.ID) + return daemon.driver.Diff(container.ID, initID) +} + +func (daemon *Daemon) createRootfs(container *Container) error { + // Step 1: create the container directory. + // This doubles as a barrier to avoid race conditions. + rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return err } - daemon.statsCollector.unsubscribe(c, ch) + if err := idtools.MkdirAs(container.root, 0700, rootUID, rootGID); err != nil { + return err + } + initID := fmt.Sprintf("%s-init", container.ID) + if err := daemon.driver.Create(initID, container.ImageID); err != nil { + return err + } + initPath, err := daemon.driver.Get(initID, "") + if err != nil { + return err + } + + if err := setupInitLayer(initPath, rootUID, rootGID); err != nil { + if err := daemon.driver.Put(initID); err != nil { + logrus.Errorf("Failed to Put init layer: %v", err) + } + return err + } + + // We want to unmount init layer before we take snapshot of it + // for the actual container. + if err := daemon.driver.Put(initID); err != nil { + return err + } + + if err := daemon.driver.Create(container.ID, initID); err != nil { + return err + } return nil } -// FIXME: this is a convenience function for integration tests -// which need direct access to daemon.graph. -// Once the tests switch to using engine and jobs, this method -// can go away. +// Graph returns *graph.Graph which can be using for layers graph operations. func (daemon *Daemon) Graph() *graph.Graph { return daemon.graph } -func (daemon *Daemon) Repositories() *graph.TagStore { - return daemon.repositories +// TagImage creates a tag in the repository reponame, pointing to the image named +// imageName. If force is true, an existing tag with the same name may be +// overwritten. +func (daemon *Daemon) TagImage(repoName, tag, imageName string, force bool) error { + if err := daemon.repositories.Tag(repoName, tag, imageName, force); err != nil { + return err + } + daemon.EventsService.Log("tag", utils.ImageReference(repoName, tag), "") + return nil } -func (daemon *Daemon) Config() *Config { - return daemon.config +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(image string, tag string, imagePullConfig *graph.ImagePullConfig) error { + return daemon.repositories.Pull(image, tag, imagePullConfig) } -func (daemon *Daemon) SystemConfig() *sysinfo.SysInfo { - return daemon.sysInfo +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src, repo, tag, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error { + return daemon.repositories.Import(src, repo, tag, msg, inConfig, outStream, containerConfig) } -func (daemon *Daemon) SystemInitPath() string { +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + return daemon.repositories.ImageExport(names, outStream) +} + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(localName string, imagePushConfig *graph.ImagePushConfig) error { + return daemon.repositories.Push(localName, imagePushConfig) +} + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + return daemon.repositories.Lookup(name) +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer) error { + return daemon.repositories.Load(inTar, outStream) +} + +// ListImages returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by pkg/parsers/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) ListImages(filterArgs, filter string, all bool) ([]*types.Image, error) { + return daemon.repositories.Images(filterArgs, filter, all) +} + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { + return daemon.repositories.History(name) +} + +// GetImage returns pointer to an Image struct corresponding to the given +// name. The name can include an optional tag; otherwise the default tag will +// be used. +func (daemon *Daemon) GetImage(name string) (*image.Image, error) { + return daemon.repositories.LookupImage(name) +} + +func (daemon *Daemon) config() *Config { + return daemon.configStore +} + +func (daemon *Daemon) systemInitPath() string { return daemon.sysInitPath } +// GraphDriver returns the currently used driver for processing +// container layers. func (daemon *Daemon) GraphDriver() graphdriver.Driver { return daemon.driver } +// ExecutionDriver returns the currently used driver for creating and +// starting execs in a container. func (daemon *Daemon) ExecutionDriver() execdriver.Driver { return daemon.execDriver } -func (daemon *Daemon) ContainerGraph() *graphdb.Database { - return daemon.containerGraph +func (daemon *Daemon) containerGraph() *graphdb.Database { + return daemon.containerGraphDB } +// GetUIDGIDMaps returns the current daemon's user namespace settings +// for the full uid and gid maps which will be applied to containers +// started in this instance. +func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { + return daemon.uidMaps, daemon.gidMaps +} + +// GetRemappedUIDGID returns the current daemon's uid and gid values +// if user namespaces are in use for this daemon instance. If not +// this function will return "real" root values of 0, 0. +func (daemon *Daemon) GetRemappedUIDGID() (int, int) { + uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + return uid, gid +} + +// ImageGetCached returns the earliest created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { + // for now just exit if imgID has no children. + // maybe parentRefs in graph could be used to store + // the Image obj children for faster lookup below but this can + // be quite memory hungry. + if !daemon.Graph().HasChildren(imgID) { + return nil, nil + } + // Retrieve all images images := daemon.Graph().Map() @@ -907,12 +1178,12 @@ func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*i } // tempDir returns the default directory to use for temporary files. -func tempDir(rootDir string) (string, error) { +func tempDir(rootDir string, rootUID, rootGID int) (string, error) { var tmpDir string if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { tmpDir = filepath.Join(rootDir, "tmp") } - return tmpDir, system.MkdirAll(tmpDir, 0700) + return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) } func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { @@ -932,7 +1203,7 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig. container.Lock() defer container.Unlock() // Register any links from the host config before starting the container - if err := daemon.RegisterLinks(container, hostConfig); err != nil { + if err := daemon.registerLinks(container, hostConfig); err != nil { return err } @@ -954,16 +1225,126 @@ func setDefaultMtu(config *Config) { var errNoDefaultRoute = errors.New("no default route was found") -// getDefaultRouteMtu returns the MTU for the default route's interface. -func getDefaultRouteMtu() (int, error) { - routes, err := netlink.NetworkGetRoutes() - if err != nil { - return 0, err - } - for _, r := range routes { - if r.Default { - return r.Iface.MTU, nil +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } } } - return 0, errNoDefaultRoute + + if hostConfig == nil { + return nil, nil + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("Invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort) + } + } + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config) +} + +func configureVolumes(config *Config, rootUID, rootGID int) (*store.VolumeStore, error) { + volumesDriver, err := local.New(config.Root, rootUID, rootGID) + if err != nil { + return nil, err + } + + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + s := store.New() + s.AddAll(volumesDriver.List()) + + return s, nil +} + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(authConfig *cliconfig.AuthConfig) (string, error) { + return daemon.RegistryService.Auth(authConfig) +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(term string, + authConfig *cliconfig.AuthConfig, + headers map[string][]string) (*registry.SearchResults, error) { + return daemon.RegistryService.Search(term, authConfig, headers) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *Container) (*execdriver.ResourceStats, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + // Retrieve the nw statistics from libnetwork and inject them in the Stats + var nwStats []*libcontainer.NetworkInterface + if nwStats, err = daemon.getNetworkStats(container); err != nil { + return nil, err + } + stats.Interfaces = nwStats + + return stats, nil +} + +func (daemon *Daemon) getNetworkStats(c *Container) ([]*libcontainer.NetworkInterface, error) { + var list []*libcontainer.NetworkInterface + + sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) + if err != nil { + return list, err + } + + stats, err := sb.Statistics() + if err != nil { + return list, err + } + + // Convert libnetwork nw stats into libcontainer nw stats + for ifName, ifStats := range stats { + list = append(list, convertLnNetworkStats(ifName, ifStats)) + } + + return list, nil +} + +func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *libcontainer.NetworkInterface { + n := &libcontainer.NetworkInterface{Name: name} + n.RxBytes = stats.RxBytes + n.RxPackets = stats.RxPackets + n.RxErrors = stats.RxErrors + n.RxDropped = stats.RxDropped + n.TxBytes = stats.TxBytes + n.TxPackets = stats.TxPackets + n.TxErrors = stats.TxErrors + n.TxDropped = stats.TxDropped + return n } diff --git a/vendor/github.com/docker/docker/daemon/daemon_btrfs.go b/vendor/github.com/docker/docker/daemon/daemon_btrfs.go index 61dac0dd..b15b37b9 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_btrfs.go +++ b/vendor/github.com/docker/docker/daemon/daemon_btrfs.go @@ -3,5 +3,6 @@ package daemon import ( + // register the btrfs graphdriver _ "github.com/docker/docker/daemon/graphdriver/btrfs" ) diff --git a/vendor/github.com/docker/docker/daemon/daemon_devicemapper.go b/vendor/github.com/docker/docker/daemon/daemon_devicemapper.go index 5b64c453..b33b01a5 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_devicemapper.go +++ b/vendor/github.com/docker/docker/daemon/daemon_devicemapper.go @@ -3,5 +3,6 @@ package daemon import ( + // register the devmapper graphdriver _ "github.com/docker/docker/daemon/graphdriver/devmapper" ) diff --git a/vendor/github.com/docker/docker/daemon/daemon_experimental.go b/vendor/github.com/docker/docker/daemon/daemon_experimental.go new file mode 100644 index 00000000..58ef6e72 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_experimental.go @@ -0,0 +1,107 @@ +// +build experimental + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/runconfig" +) + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, nil, fmt.Errorf("User namespaces are not supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + var ( + uidMaps, gidMaps []idtools.IDMap + ) + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warnf("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return uidMaps, gidMaps, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + if err != nil { + return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + } + } + return uidMaps, gidMaps, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + // the main docker root needs to be accessible by all users, as user namespace support + // will create subdirectories owned by either a) the real system root (when no remapping + // is setup) or b) the remapped root host ID (when --root=uid:gid is used) + // for "first time" users of user namespaces, we need to migrate the current directory + // contents to the "0.0" (root == root "namespace" daemon root) + nsRoot := "0.0" + if _, err := os.Stat(rootDir); err == nil { + // root current exists; we need to check for a prior migration + if _, err := os.Stat(filepath.Join(rootDir, nsRoot)); err != nil && os.IsNotExist(err) { + // need to migrate current root to "0.0" subroot + // 1. create non-usernamespaced root as "0.0" + if err := os.Mkdir(filepath.Join(rootDir, nsRoot), 0700); err != nil { + return fmt.Errorf("Cannot create daemon root %q: %v", filepath.Join(rootDir, nsRoot), err) + } + // 2. move current root content to "0.0" new subroot + if err := directory.MoveToSubdir(rootDir, nsRoot); err != nil { + return fmt.Errorf("Cannot migrate current daemon root %q for user namespaces: %v", rootDir, err) + } + // 3. chmod outer root to 755 + if chmodErr := os.Chmod(rootDir, 0755); chmodErr != nil { + return chmodErr + } + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0755 with root:root ownership + if err := os.MkdirAll(rootDir, 0755); err != nil { + return err + } + // create the "0.0" subroot (so no future "migration" happens of the root) + if err := os.Mkdir(filepath.Join(rootDir, nsRoot), 0700); err != nil { + return err + } + } + + // for user namespaces we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + nsRoot = fmt.Sprintf("%d.%d", rootUID, rootGID) + } + config.Root = filepath.Join(rootDir, nsRoot) + logrus.Debugf("Creating actual daemon root: %s", config.Root) + + // Create the root directory if it doesn't exists + if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + return nil +} + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { + if hostConfig.Privileged && daemon.config().RemappedRoot != "" { + return nil, fmt.Errorf("Privileged mode is incompatible with user namespace mappings") + } + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux.go b/vendor/github.com/docker/docker/daemon/daemon_linux.go new file mode 100644 index 00000000..cda0e82e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_linux.go @@ -0,0 +1,60 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + logrus.Debugf("Cleaning up old shm/mqueue mounts: start.") + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReader(f, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReader(reader io.Reader, unmount func(target string) error) error { + if daemon.repository == "" { + return nil + } + sc := bufio.NewScanner(reader) + var errors []string + for sc.Scan() { + line := sc.Text() + fields := strings.Fields(line) + if strings.HasPrefix(fields[4], daemon.repository) { + logrus.Debugf("Mount base: %v, repository %s", fields[4], daemon.repository) + mnt := fields[4] + mountBase := filepath.Base(mnt) + if mountBase == "mqueue" || mountBase == "shm" { + logrus.Debugf("Unmounting %v", mnt) + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaningup mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old shm/mqueue mounts: done.") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux_test.go b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go new file mode 100644 index 00000000..0439d0bc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go @@ -0,0 +1,74 @@ +// +build linux + +package daemon + +import ( + "strings" + "testing" +) + +func TestCleanupMounts(t *testing.T) { + fixture := `230 138 0:60 / / rw,relatime - overlay overlay rw,lowerdir=/var/lib/docker/overlay/0ef9f93d5d365c1385b09d54bbee6afff3d92002c16f22eccb6e1549b2ff97d8/root,upperdir=/var/lib/docker/overlay/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb/upper,workdir=/var/lib/docker/overlay/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb/work +231 230 0:56 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +232 230 0:57 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 +233 232 0:58 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 +234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +235 232 0:55 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +236 230 0:61 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +237 236 0:62 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw +238 237 0:21 /system.slice/docker.service /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd +239 237 0:23 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event +240 237 0:24 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset,clone_children +241 237 0:25 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices +242 237 0:26 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer +243 237 0:27 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu,cpuacct +244 237 0:28 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio +245 237 0:29 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,net_cls,net_prio +246 237 0:30 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb +247 237 0:31 /docker/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory +248 230 253:1 /var/lib/docker/volumes/510cc41ac68c48bd4eac932e3e09711673876287abf1b185312cfbfe6261a111/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/ba70ea0c-1a8f-4ee4-9687-cb393730e2b5 rw,errors=remount-ro,data=ordered +250 230 253:1 /var/lib/docker/containers/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/ba70ea0c-1a8f-4ee4-9687-cb393730e2b5 rw,errors=remount-ro,data=ordered +251 230 253:1 /var/lib/docker/containers/dfac036ce135a8914e292cb2f6fea114f7339983c186366aa26d0051e93162cb/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/ba70ea0c-1a8f-4ee4-9687-cb393730e2b5 rw,errors=remount-ro,data=ordered +252 232 0:13 /1 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +139 236 0:11 / /sys/kernel/security rw,relatime - securityfs none rw +140 230 0:54 / /tmp rw,relatime - tmpfs none rw +145 230 0:3 / /run/docker/netns/default rw - nsfs nsfs rw +130 140 0:45 / /tmp/docker_recursive_mount_test312125472/tmpfs rw,relatime - tmpfs tmpfs rw +131 230 0:3 / /run/docker/netns/47903e2e6701 rw - nsfs nsfs rw +133 230 0:55 / /go/src/github.com/docker/docker/bundles/1.9.0-dev/test-integration-cli/d45526097/graph/containers/47903e2e67014246eba27607809d5f5c2437c3bf84c2986393448f84093cc40b/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw` + + d := &Daemon{ + repository: "/go/src/github.com/docker/docker/bundles/1.9.0-dev/test-integration-cli/d45526097/graph/containers/", + } + + expected := "/go/src/github.com/docker/docker/bundles/1.9.0-dev/test-integration-cli/d45526097/graph/containers/47903e2e67014246eba27607809d5f5c2437c3bf84c2986393448f84093cc40b/mqueue" + var unmounted bool + unmount := func(target string) error { + if target == expected { + unmounted = true + } + return nil + } + + d.cleanupMountsFromReader(strings.NewReader(fixture), unmount) + + if !unmounted { + t.Fatalf("Expected to unmount the mqueue") + } +} + +func TestNotCleanupMounts(t *testing.T) { + d := &Daemon{ + repository: "", + } + var unmounted bool + unmount := func(target string) error { + unmounted = true + return nil + } + mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` + d.cleanupMountsFromReader(strings.NewReader(mountInfo), unmount) + if unmounted { + t.Fatalf("Expected not to clean up /dev/shm") + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_overlay.go b/vendor/github.com/docker/docker/daemon/daemon_overlay.go index 25a42a19..3c97db13 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_overlay.go +++ b/vendor/github.com/docker/docker/daemon/daemon_overlay.go @@ -3,5 +3,6 @@ package daemon import ( + // register the overlay graphdriver _ "github.com/docker/docker/daemon/graphdriver/overlay" ) diff --git a/vendor/github.com/docker/docker/daemon/daemon_stub.go b/vendor/github.com/docker/docker/daemon/daemon_stub.go new file mode 100644 index 00000000..8fbb0508 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_stub.go @@ -0,0 +1,28 @@ +// +build !experimental + +package daemon + +import ( + "os" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" +) + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // Create the root directory if it doesn't exists + if err := system.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_test.go b/vendor/github.com/docker/docker/daemon/daemon_test.go index 1b0c9d90..6dcce346 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_test.go +++ b/vendor/github.com/docker/docker/daemon/daemon_test.go @@ -13,8 +13,9 @@ import ( "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" + volumedrivers "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" ) // @@ -86,9 +87,9 @@ func TestGet(t *testing.T) { graph.Set(c5.Name, c5.ID) daemon := &Daemon{ - containers: store, - idIndex: index, - containerGraph: graph, + containers: store, + idIndex: index, + containerGraphDB: graph, } if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { @@ -130,15 +131,15 @@ func TestLoadWithVolume(t *testing.T) { } defer os.RemoveAll(tmp) - containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" - containerPath := filepath.Join(tmp, containerId) + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) if err := os.MkdirAll(containerPath, 0755); err != nil { t.Fatal(err) } - hostVolumeId := stringid.GenerateNonCryptoID() - vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId) - volumePath := filepath.Join(tmp, "volumes", hostVolumeId) + hostVolumeID := stringid.GenerateNonCryptoID() + vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeID) + volumePath := filepath.Join(tmp, "volumes", hostVolumeID) if err := os.MkdirAll(vfsPath, 0755); err != nil { t.Fatal(err) @@ -160,12 +161,12 @@ func TestLoadWithVolume(t *testing.T) { "StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, "NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", "NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", -"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}}, +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, "ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", "HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", "HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", "LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", -"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, "UpdateDns":false,"Volumes":{"/vol1":"%s"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}` cfg := fmt.Sprintf(config, vfsPath) @@ -173,8 +174,8 @@ func TestLoadWithVolume(t *testing.T) { t.Fatal(err) } - hostConfig := `{"Binds":[],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", -"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, + hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, "Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, "SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil { @@ -187,7 +188,7 @@ func TestLoadWithVolume(t *testing.T) { } defer volumedrivers.Unregister(volume.DefaultDriverName) - c, err := daemon.load(containerId) + c, err := daemon.load(containerID) if err != nil { t.Fatal(err) } @@ -202,8 +203,8 @@ func TestLoadWithVolume(t *testing.T) { } m := c.MountPoints["/vol1"] - if m.Name != hostVolumeId { - t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name) + if m.Name != hostVolumeID { + t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeID, m.Name) } if m.Destination != "/vol1" { @@ -235,8 +236,8 @@ func TestLoadWithBindMount(t *testing.T) { } defer os.RemoveAll(tmp) - containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" - containerPath := filepath.Join(tmp, containerId) + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) if err = os.MkdirAll(containerPath, 0755); err != nil { t.Fatal(err) } @@ -249,20 +250,20 @@ func TestLoadWithBindMount(t *testing.T) { "StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, "NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", "NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", -"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}}, +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, "ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", "HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", "HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", "LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", -"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, "UpdateDns":false,"Volumes":{"/vol1": "/vol1"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}` if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(config), 0644); err != nil { t.Fatal(err) } - hostConfig := `{"Binds":["/vol1:/vol1"],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", -"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, + hostConfig := `{"Binds":["/vol1:/vol1"],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, "Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, "SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil { @@ -275,7 +276,7 @@ func TestLoadWithBindMount(t *testing.T) { } defer volumedrivers.Unregister(volume.DefaultDriverName) - c, err := daemon.load(containerId) + c, err := daemon.load(containerID) if err != nil { t.Fatal(err) } @@ -314,14 +315,14 @@ func TestLoadWithVolume17RC(t *testing.T) { } defer os.RemoveAll(tmp) - containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" - containerPath := filepath.Join(tmp, containerId) + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) if err := os.MkdirAll(containerPath, 0755); err != nil { t.Fatal(err) } - hostVolumeId := "6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101" - volumePath := filepath.Join(tmp, "volumes", hostVolumeId) + hostVolumeID := "6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101" + volumePath := filepath.Join(tmp, "volumes", hostVolumeID) if err := os.MkdirAll(volumePath, 0755); err != nil { t.Fatal(err) @@ -340,20 +341,20 @@ func TestLoadWithVolume17RC(t *testing.T) { "StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, "NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", "NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", -"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}}, +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, "ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", "HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", "HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", "LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", -"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, "UpdateDns":false,"MountPoints":{"/vol1":{"Name":"6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101","Destination":"/vol1","Driver":"local","RW":true,"Source":"","Relabel":""}},"AppliedVolumesFrom":null}` if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(config), 0644); err != nil { t.Fatal(err) } - hostConfig := `{"Binds":[],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", -"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, + hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, "Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, "SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil { @@ -366,7 +367,7 @@ func TestLoadWithVolume17RC(t *testing.T) { } defer volumedrivers.Unregister(volume.DefaultDriverName) - c, err := daemon.load(containerId) + c, err := daemon.load(containerID) if err != nil { t.Fatal(err) } @@ -381,8 +382,8 @@ func TestLoadWithVolume17RC(t *testing.T) { } m := c.MountPoints["/vol1"] - if m.Name != hostVolumeId { - t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name) + if m.Name != hostVolumeID { + t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeID, m.Name) } if m.Destination != "/vol1" { @@ -414,15 +415,15 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) { } defer os.RemoveAll(tmp) - containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" - containerPath := filepath.Join(tmp, containerId) + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) if err := os.MkdirAll(containerPath, 0755); err != nil { t.Fatal(err) } - hostVolumeId := stringid.GenerateNonCryptoID() - vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId) - volumePath := filepath.Join(tmp, "volumes", hostVolumeId) + hostVolumeID := stringid.GenerateNonCryptoID() + vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeID) + volumePath := filepath.Join(tmp, "volumes", hostVolumeID) if err := os.MkdirAll(vfsPath, 0755); err != nil { t.Fatal(err) @@ -444,12 +445,12 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) { "StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, "NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", "NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", -"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}}, +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, "ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", "HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", "HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", "LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", -"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, "UpdateDns":false,"Volumes":{"/vol1":"%s"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}` cfg := fmt.Sprintf(config, vfsPath) @@ -457,8 +458,8 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) { t.Fatal(err) } - hostConfig := `{"Binds":[],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", -"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, + hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, "Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, "SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil { @@ -471,7 +472,7 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) { } defer volumedrivers.Unregister(volume.DefaultDriverName) - c, err := daemon.load(containerId) + c, err := daemon.load(containerID) if err != nil { t.Fatal(err) } @@ -486,12 +487,12 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) { } m := c.MountPoints["/vol1"] - v, err := createVolume(m.Name, m.Driver) + _, err = daemon.VolumeCreate(m.Name, m.Driver, nil) if err != nil { t.Fatal(err) } - if err := removeVolume(v); err != nil { + if err := daemon.VolumeRm(m.Name); err != nil { t.Fatal(err) } @@ -505,9 +506,10 @@ func initDaemonForVolumesTest(tmp string) (*Daemon, error) { daemon := &Daemon{ repository: tmp, root: tmp, + volumes: store.New(), } - volumesDriver, err := local.New(tmp) + volumesDriver, err := local.New(tmp, 0, 0) if err != nil { return nil, err } @@ -547,3 +549,28 @@ func TestParseSecurityOpt(t *testing.T) { t.Fatal("Expected parseSecurityOpt error, got nil") } } + +func TestNetworkOptions(t *testing.T) { + daemon := &Daemon{} + dconfigCorrect := &Config{ + CommonConfig: CommonConfig{ + DefaultNetwork: "netPlugin:mynet:dev", + ClusterStore: "consul://localhost:8500", + ClusterAdvertise: "192.168.0.1:8000", + }, + } + + if _, err := daemon.networkOptions(dconfigCorrect); err != nil { + t.Fatalf("Expect networkOptions sucess, got error: %v", err) + } + + dconfigWrong := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "consul://localhost:8500://test://bbb", + }, + } + + if _, err := daemon.networkOptions(dconfigWrong); err == nil { + t.Fatalf("Expected networkOptions error, got nil") + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix.go b/vendor/github.com/docker/docker/daemon/daemon_unix.go index f33ffea4..cca5691a 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_unix.go +++ b/vendor/github.com/docker/docker/daemon/daemon_unix.go @@ -1,46 +1,45 @@ -// +build !windows +// +build linux freebsd package daemon import ( "fmt" "net" - "net/http" "os" "path/filepath" + "strconv" "strings" "syscall" "github.com/Sirupsen/logrus" - "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/dockerversion" + derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" - volumedrivers "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume" "github.com/docker/libnetwork" - nwapi "github.com/docker/libnetwork/api" nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/bridge" + "github.com/docker/libnetwork/ipamutils" "github.com/docker/libnetwork/netlabel" "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" "github.com/opencontainers/runc/libcontainer/label" + "github.com/vishvananda/netlink" ) -func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { - initID := fmt.Sprintf("%s-init", container.ID) - return daemon.driver.Changes(container.ID, initID) -} - -func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { - initID := fmt.Sprintf("%s-init", container.ID) - return daemon.driver.Diff(container.ID, initID) -} +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true +) func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error { var ( @@ -67,34 +66,15 @@ func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error return err } -func (daemon *Daemon) createRootfs(container *Container) error { - // Step 1: create the container directory. - // This doubles as a barrier to avoid race conditions. - if err := os.Mkdir(container.root, 0700); err != nil { - return err +func checkKernelVersion(k, major, minor int) bool { + if v, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("%s", err) + } else { + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } } - initID := fmt.Sprintf("%s-init", container.ID) - if err := daemon.driver.Create(initID, container.ImageID); err != nil { - return err - } - initPath, err := daemon.driver.Get(initID, "") - if err != nil { - return err - } - - if err := setupInitLayer(initPath); err != nil { - daemon.driver.Put(initID) - return err - } - - // We want to unmount init layer before we take snapshot of it - // for the actual container. - daemon.driver.Put(initID) - - if err := daemon.driver.Create(container.ID, initID); err != nil { - return err - } - return nil + return true } func checkKernel() error { @@ -105,68 +85,60 @@ func checkKernel() error { // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.10 crashes are clearer. // For details see https://github.com/docker/docker/issues/407 - if k, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("%s", err) - } else { - if kernel.CompareKernelVersion(*k, kernel.VersionInfo{Kernel: 3, Major: 10, Minor: 0}) < 0 { - if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { - logrus.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.10.0.", k.String()) - } + if !checkKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String()) } } return nil } -func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig) { +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, adjustCPUShares bool) { if hostConfig == nil { return } + + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } } -func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { - var warnings []string +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) - if config != nil { - // The check for a valid workdir path is made on the server rather than in the - // client. This is because we don't know the type of path (Linux or Windows) - // to validate on the client. - if config.WorkingDir != "" && !filepath.IsAbs(config.WorkingDir) { - return warnings, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir) - } + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err } - if hostConfig == nil { - return warnings, nil - } - - for port := range hostConfig.PortBindings { - _, portStr := nat.SplitProtoPort(string(port)) - if _, err := nat.ParsePort(portStr); err != nil { - return warnings, fmt.Errorf("Invalid port specification: %q", portStr) - } - for _, pb := range hostConfig.PortBindings[port] { - _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) - if err != nil { - return warnings, fmt.Errorf("Invalid port specification: %q", pb.HostPort) - } - } - } - if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") { - return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name()) - } + // memory subsystem checks and adjustments if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 { return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") } - if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit { + if hostConfig.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") hostConfig.Memory = 0 + hostConfig.MemorySwap = -1 } - if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit { + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") hostConfig.MemorySwap = -1 @@ -177,7 +149,7 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.") } - if hostConfig.MemorySwappiness != nil && !daemon.SystemConfig().MemorySwappiness { + if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") hostConfig.MemorySwappiness = nil @@ -188,24 +160,72 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100.", swappiness) } } - if hostConfig.CPUPeriod > 0 && !daemon.SystemConfig().CpuCfsPeriod { + if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + hostConfig.MemoryReservation = 0 + } + if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + hostConfig.KernelMemory = 0 + } + if hostConfig.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if hostConfig.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + hostConfig.CPUShares = 0 + } + if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") hostConfig.CPUPeriod = 0 } - if hostConfig.CPUQuota > 0 && !daemon.SystemConfig().CpuCfsQuota { + if hostConfig.CPUQuota > 0 && !sysInfo.CPUCfsQuota { warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") hostConfig.CPUQuota = 0 } + if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + hostConfig.CpusetCpus = "" + hostConfig.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus) + if err != nil { + return warnings, derr.ErrorCodeInvalidCpusetCpus.WithArgs(hostConfig.CpusetCpus) + } + if !cpusAvailable { + return warnings, derr.ErrorCodeNotAvailableCpusetCpus.WithArgs(hostConfig.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems) + if err != nil { + return warnings, derr.ErrorCodeInvalidCpusetMems.WithArgs(hostConfig.CpusetMems) + } + if !memsAvailable { + return warnings, derr.ErrorCodeNotAvailableCpusetMems.WithArgs(hostConfig.CpusetMems, sysInfo.Mems) + } + if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + hostConfig.BlkioWeight = 0 + } if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) { return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.") } - if hostConfig.OomKillDisable && !daemon.SystemConfig().OomKillDisable { + if hostConfig.OomKillDisable && !sysInfo.OomKillDisable { hostConfig.OomKillDisable = false return warnings, fmt.Errorf("Your kernel does not support oom kill disable.") } - if daemon.SystemConfig().IPv4ForwardingDisabled { + + if sysInfo.IPv4ForwardingDisabled { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") } @@ -219,7 +239,7 @@ func checkConfigOptions(config *Config) error { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if !config.Bridge.EnableIPTables && !config.Bridge.InterContainerCommunication { - return fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true.") } if !config.Bridge.EnableIPTables && config.Bridge.EnableIPMasq { config.Bridge.EnableIPMasq = false @@ -232,19 +252,16 @@ func checkSystem() error { if os.Geteuid() != 0 { return fmt.Errorf("The Docker daemon needs to be run as root") } - if err := checkKernel(); err != nil { - return err - } - return nil + return checkKernel() } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *Config, driverName string) error { if config.EnableSelinuxSupport { if selinuxEnabled() { - // As Docker on btrfs and SELinux are incompatible at present, error on both being enabled - if driverName == "btrfs" { - return fmt.Errorf("SELinux is not supported with the BTRFS graph driver") + // As Docker on either btrfs or overlayFS and SELinux are incompatible at present, error on both being enabled + if driverName == "btrfs" || driverName == "overlay" { + return fmt.Errorf("SELinux is not supported with the %s graph driver", driverName) } logrus.Debug("SELinux enabled successfully") } else { @@ -261,25 +278,16 @@ func migrateIfDownlevel(driver graphdriver.Driver, root string) error { return migrateIfAufs(driver, root) } -func configureVolumes(config *Config) error { - volumesDriver, err := local.New(config.Root) - if err != nil { - return err - } - volumedrivers.Register(volumesDriver, volumesDriver.Name()) - return nil -} - -func configureSysInit(config *Config) (string, error) { - localCopy := filepath.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) +func configureSysInit(config *Config, rootUID, rootGID int) (string, error) { + localCopy := filepath.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.Version)) sysInitPath := utils.DockerInitPath(localCopy) if sysInitPath == "" { - return "", fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See https://docs.docker.com/contributing/devenvironment for official build instructions.") + return "", fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See https://docs.docker.com/project/set-up-dev-env/ for official build instructions.") } if sysInitPath != localCopy { // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). - if err := os.Mkdir(filepath.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAs(filepath.Dir(localCopy), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err } if _, err := fileutils.CopyFile(sysInitPath, localCopy); err != nil { @@ -297,11 +305,14 @@ func isBridgeNetworkDisabled(config *Config) bool { return config.Bridge.Iface == disableNetworkBridge } -func networkOptions(dconfig *Config) ([]nwconfig.Option, error) { +func (daemon *Daemon) networkOptions(dconfig *Config) ([]nwconfig.Option, error) { options := []nwconfig.Option{} if dconfig == nil { return options, nil } + + options = append(options, nwconfig.OptionDataDir(dconfig.Root)) + if strings.TrimSpace(dconfig.DefaultNetwork) != "" { dn := strings.Split(dconfig.DefaultNetwork, ":") if len(dn) < 2 { @@ -316,21 +327,33 @@ func networkOptions(dconfig *Config) ([]nwconfig.Option, error) { options = append(options, nwconfig.OptionDefaultNetwork(dn)) } - if strings.TrimSpace(dconfig.NetworkKVStore) != "" { - kv := strings.Split(dconfig.NetworkKVStore, ":") - if len(kv) < 2 { - return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER:KV-URL") + if strings.TrimSpace(dconfig.ClusterStore) != "" { + kv := strings.Split(dconfig.ClusterStore, "://") + if len(kv) != 2 { + return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") } options = append(options, nwconfig.OptionKVProvider(kv[0])) - options = append(options, nwconfig.OptionKVProviderURL(strings.Join(kv[1:], ":"))) + options = append(options, nwconfig.OptionKVProviderURL(kv[1])) + } + if len(dconfig.ClusterOpts) > 0 { + options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) + } + + if daemon.discoveryWatcher != nil { + options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) + } + + if dconfig.ClusterAdvertise != "" { + options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) } options = append(options, nwconfig.OptionLabels(dconfig.Labels)) + options = append(options, driverOptions(dconfig)...) return options, nil } -func initNetworkController(config *Config) (libnetwork.NetworkController, error) { - netOptions, err := networkOptions(config) +func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config) if err != nil { return nil, err } @@ -340,24 +363,13 @@ func initNetworkController(config *Config) (libnetwork.NetworkController, error) return nil, fmt.Errorf("error obtaining controller instance: %v", err) } - // Initialize default driver "null" - - if err := controller.ConfigureNetworkDriver("null", options.Generic{}); err != nil { - return nil, fmt.Errorf("Error initializing null driver: %v", err) - } - // Initialize default network on "null" - if _, err := controller.NewNetwork("null", "none"); err != nil { + if _, err := controller.NewNetwork("null", "none", libnetwork.NetworkOptionPersist(false)); err != nil { return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) } - // Initialize default driver "host" - if err := controller.ConfigureNetworkDriver("host", options.Generic{}); err != nil { - return nil, fmt.Errorf("Error initializing host driver: %v", err) - } - // Initialize default network on "host" - if _, err := controller.NewNetwork("host", "host"); err != nil { + if _, err := controller.NewNetwork("host", "host", libnetwork.NetworkOptionPersist(false)); err != nil { return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) } @@ -371,31 +383,63 @@ func initNetworkController(config *Config) (libnetwork.NetworkController, error) return controller, nil } -func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { - option := options.Generic{ - "EnableIPForwarding": config.Bridge.EnableIPForward} +func driverOptions(config *Config) []nwconfig.Option { + bridgeConfig := options.Generic{ + "EnableIPForwarding": config.Bridge.EnableIPForward, + "EnableIPTables": config.Bridge.EnableIPTables, + "EnableUserlandProxy": config.Bridge.EnableUserlandProxy} + bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} - if err := controller.ConfigureNetworkDriver("bridge", options.Generic{netlabel.GenericData: option}); err != nil { - return fmt.Errorf("Error initializing bridge driver: %v", err) + dOptions := []nwconfig.Option{} + dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) + return dOptions +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return fmt.Errorf("could not delete the default bridge network: %v", err) + } } - netOption := options.Generic{ - "BridgeName": config.Bridge.Iface, - "Mtu": config.Mtu, - "EnableIPTables": config.Bridge.EnableIPTables, - "EnableIPMasquerade": config.Bridge.EnableIPMasq, - "EnableICC": config.Bridge.InterContainerCommunication, - "EnableUserlandProxy": config.Bridge.EnableUserlandProxy, + bridgeName := bridge.DefaultBridgeName + if config.Bridge.Iface != "" { + bridgeName = config.Bridge.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableIPMasquerade: strconv.FormatBool(config.Bridge.EnableIPMasq), + bridge.EnableICC: strconv.FormatBool(config.Bridge.InterContainerCommunication), + } + + // --ip processing + if config.Bridge.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.Bridge.DefaultIP.String() + } + + ipamV4Conf := libnetwork.IpamConf{} + + ipamV4Conf.AuxAddresses = make(map[string]string) + + if nw, _, err := ipamutils.ElectInterfaceAddresses(bridgeName); err == nil { + ipamV4Conf.PreferredPool = nw.String() + hip, _ := types.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } } if config.Bridge.IP != "" { - ip, bipNet, err := net.ParseCIDR(config.Bridge.IP) + ipamV4Conf.PreferredPool = config.Bridge.IP + ip, _, err := net.ParseCIDR(config.Bridge.IP) if err != nil { return err } - - bipNet.IP = ip - netOption["AddressIPv4"] = bipNet + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } if config.Bridge.FixedCIDR != "" { @@ -404,37 +448,44 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e return err } - netOption["FixedCIDR"] = fCIDR + ipamV4Conf.SubPool = fCIDR.String() } + if config.Bridge.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.Bridge.DefaultGatewayIPv4.String() + } + + var ipamV6Conf *libnetwork.IpamConf if config.Bridge.FixedCIDRv6 != "" { _, fCIDRv6, err := net.ParseCIDR(config.Bridge.FixedCIDRv6) if err != nil { return err } - - netOption["FixedCIDRv6"] = fCIDRv6 - } - - if config.Bridge.DefaultGatewayIPv4 != nil { - netOption["DefaultGatewayIPv4"] = config.Bridge.DefaultGatewayIPv4 + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{} + } + ipamV6Conf.PreferredPool = fCIDRv6.String() } if config.Bridge.DefaultGatewayIPv6 != nil { - netOption["DefaultGatewayIPv6"] = config.Bridge.DefaultGatewayIPv6 + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{} + } + ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.Bridge.DefaultGatewayIPv6.String() } - // --ip processing - if config.Bridge.DefaultIP != nil { - netOption["DefaultBindingIP"] = config.Bridge.DefaultIP + v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + if ipamV6Conf != nil { + v6Conf = append(v6Conf, ipamV6Conf) } - // Initialize default network on "bridge" with the same name _, err := controller.NewNetwork("bridge", "bridge", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, netlabel.EnableIPv6: config.Bridge.EnableIPv6, - })) + }), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf)) if err != nil { return fmt.Errorf("Error creating default \"bridge\" network: %v", err) } @@ -447,7 +498,7 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. -func setupInitLayer(initLayer string) error { +func setupInitLayer(initLayer string, rootUID, rootGID int) error { for pth, typ := range map[string]string{ "/dev/pts": "dir", "/dev/shm": "dir", @@ -470,12 +521,12 @@ func setupInitLayer(initLayer string) error { if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { if os.IsNotExist(err) { - if err := system.MkdirAll(filepath.Join(initLayer, filepath.Dir(pth)), 0755); err != nil { + if err := idtools.MkdirAllAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { return err } switch typ { case "dir": - if err := system.MkdirAll(filepath.Join(initLayer, pth), 0755); err != nil { + if err := idtools.MkdirAllAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { return err } case "file": @@ -484,6 +535,7 @@ func setupInitLayer(initLayer string) error { return err } f.Close() + f.Chown(rootUID, rootGID) default: if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { return err @@ -499,11 +551,8 @@ func setupInitLayer(initLayer string) error { return nil } -func (daemon *Daemon) NetworkApiRouter() func(w http.ResponseWriter, req *http.Request) { - return nwapi.NewHTTPHandler(daemon.netController) -} - -func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error { if hostConfig == nil || hostConfig.Links == nil { return nil } @@ -528,7 +577,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig. if child.hostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } - if err := daemon.RegisterLink(container, child, alias); err != nil { + if err := daemon.registerLink(container, child, alias); err != nil { return err } } @@ -536,23 +585,56 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig. // After we load all the links into the daemon // set them to nil on the hostconfig hostConfig.Links = nil - if err := container.WriteHostConfig(); err != nil { + if err := container.writeHostConfig(); err != nil { return err } return nil } -func (daemon *Daemon) newBaseContainer(id string) Container { - return Container{ +func (daemon *Daemon) newBaseContainer(id string) *Container { + return &Container{ CommonContainer: CommonContainer{ ID: id, State: NewState(), execCommands: newExecStore(), root: daemon.containerRoot(id), + MountPoints: make(map[string]*volume.MountPoint), }, - MountPoints: make(map[string]*mountPoint), - Volumes: make(map[string]string), - VolumesRW: make(map[string]bool), + Volumes: make(map[string]string), + VolumesRW: make(map[string]bool), } } + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) { + if err := daemon.Unmount(container); err != nil { + logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) + } +} + +// getDefaultRouteMtu returns the MTU for the default route's interface. +func getDefaultRouteMtu() (int, error) { + routes, err := netlink.RouteList(nil, 0) + if err != nil { + return 0, err + } + for _, r := range routes { + // a nil Dst means that this is the default route. + if r.Dst == nil { + i, err := net.InterfaceByIndex(r.LinkIndex) + if err != nil { + continue + } + return i.MTU, nil + } + } + return 0, errNoDefaultRoute +} diff --git a/vendor/github.com/docker/docker/api/server/server_linux_test.go b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go similarity index 59% rename from vendor/github.com/docker/docker/api/server/server_linux_test.go rename to vendor/github.com/docker/docker/daemon/daemon_unix_test.go index 83244b1c..fc13e667 100644 --- a/vendor/github.com/docker/docker/api/server/server_linux_test.go +++ b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go @@ -1,67 +1,86 @@ -// +build linux +// +build !windows -package server +package daemon import ( + "io/ioutil" + "os" "testing" - "github.com/docker/docker/pkg/version" "github.com/docker/docker/runconfig" ) -func TestAdjustCPUSharesOldApi(t *testing.T) { - apiVersion := version.Version("1.18") +func TestAdjustCPUShares(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + hostConfig := &runconfig.HostConfig{ CPUShares: linuxMinCPUShares - 1, } - adjustCPUShares(apiVersion, hostConfig) + daemon.adaptContainerSettings(hostConfig, true) if hostConfig.CPUShares != linuxMinCPUShares { t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) } hostConfig.CPUShares = linuxMaxCPUShares + 1 - adjustCPUShares(apiVersion, hostConfig) + daemon.adaptContainerSettings(hostConfig, true) if hostConfig.CPUShares != linuxMaxCPUShares { t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) } hostConfig.CPUShares = 0 - adjustCPUShares(apiVersion, hostConfig) + daemon.adaptContainerSettings(hostConfig, true) if hostConfig.CPUShares != 0 { t.Error("Expected CPUShares to be unchanged") } hostConfig.CPUShares = 1024 - adjustCPUShares(apiVersion, hostConfig) + daemon.adaptContainerSettings(hostConfig, true) if hostConfig.CPUShares != 1024 { t.Error("Expected CPUShares to be unchanged") } } func TestAdjustCPUSharesNoAdjustment(t *testing.T) { - apiVersion := version.Version("1.19") + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + hostConfig := &runconfig.HostConfig{ CPUShares: linuxMinCPUShares - 1, } - adjustCPUShares(apiVersion, hostConfig) + daemon.adaptContainerSettings(hostConfig, false) if hostConfig.CPUShares != linuxMinCPUShares-1 { t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) } hostConfig.CPUShares = linuxMaxCPUShares + 1 - adjustCPUShares(apiVersion, hostConfig) + daemon.adaptContainerSettings(hostConfig, false) if hostConfig.CPUShares != linuxMaxCPUShares+1 { t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) } hostConfig.CPUShares = 0 - adjustCPUShares(apiVersion, hostConfig) + daemon.adaptContainerSettings(hostConfig, false) if hostConfig.CPUShares != 0 { t.Error("Expected CPUShares to be unchanged") } hostConfig.CPUShares = 1024 - adjustCPUShares(apiVersion, hostConfig) + daemon.adaptContainerSettings(hostConfig, false) if hostConfig.CPUShares != 1024 { t.Error("Expected CPUShares to be unchanged") } diff --git a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go new file mode 100644 index 00000000..987528f4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows + +package daemon + +const platformSupported = false diff --git a/vendor/github.com/docker/docker/daemon/daemon_windows.go b/vendor/github.com/docker/docker/daemon/daemon_windows.go index e89526d8..4050e288 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_windows.go +++ b/vendor/github.com/docker/docker/daemon/daemon_windows.go @@ -3,69 +3,29 @@ package daemon import ( "fmt" "os" - "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/windows" - "github.com/docker/docker/pkg/archive" + // register the windows graph driver + _ "github.com/docker/docker/daemon/graphdriver/windows" "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/docker/libnetwork" - "github.com/microsoft/hcsshim" ) -const DefaultVirtualSwitch = "Virtual Switch" - -func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { - return daemon.driver.Changes(container.ID, container.ImageID) -} - -func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { - return daemon.driver.Diff(container.ID, container.ImageID) -} +const ( + defaultVirtualSwitch = "Virtual Switch" + platformSupported = true + windowsMinCPUShares = 1 + windowsMaxCPUShares = 9 +) func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error { return nil } -func (daemon *Daemon) createRootfs(container *Container) error { - // Step 1: create the container directory. - // This doubles as a barrier to avoid race conditions. - if err := os.Mkdir(container.root, 0700); err != nil { - return err - } - - if wd, ok := daemon.driver.(*windows.WindowsGraphDriver); ok { - if container.ImageID != "" { - // Get list of paths to parent layers. - logrus.Debugln("createRootfs: Container has parent image:", container.ImageID) - img, err := daemon.graph.Get(container.ImageID) - if err != nil { - return err - } - - ids, err := daemon.graph.ParentLayerIds(img) - if err != nil { - return err - } - logrus.Debugf("Got image ids: %d", len(ids)) - - if err := hcsshim.CreateSandboxLayer(wd.Info(), container.ID, container.ImageID, wd.LayerIdsToPaths(ids)); err != nil { - return err - } - } else { - if err := daemon.driver.Create(container.ID, container.ImageID); err != nil { - return err - } - } - } else { - // Fall-back code path to allow the use of the VFS driver for development - if err := daemon.driver.Create(container.ID, container.ImageID); err != nil { - return err - } - - } +func setupInitLayer(initLayer string, rootUID, rootGID int) error { return nil } @@ -73,12 +33,25 @@ func checkKernel() error { return nil } -func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig) { - // TODO Windows. +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, adjustCPUShares bool) { + if hostConfig == nil { + return + } + + if hostConfig.CPUShares < 0 { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, windowsMinCPUShares) + hostConfig.CPUShares = windowsMinCPUShares + } else if hostConfig.CPUShares > windowsMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, windowsMaxCPUShares) + hostConfig.CPUShares = windowsMaxCPUShares + } } -func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { - // TODO Windows. Verifications TBC +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { return nil, nil } @@ -89,21 +62,15 @@ func checkConfigOptions(config *Config) error { // checkSystem validates platform-specific requirements func checkSystem() error { - var dwVersion uint32 - - // TODO Windows. May need at some point to ensure have elevation and - // possibly LocalSystem. - // Validate the OS version. Note that docker.exe must be manifested for this // call to return the correct version. - dwVersion, err := syscall.GetVersion() + osv, err := system.GetOSVersion() if err != nil { - return fmt.Errorf("Failed to call GetVersion()") + return err } - if int(dwVersion&0xFF) < 10 { + if osv.MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } - return nil } @@ -116,12 +83,7 @@ func migrateIfDownlevel(driver graphdriver.Driver, root string) error { return nil } -func configureVolumes(config *Config) error { - // Windows does not support volumes at this time - return nil -} - -func configureSysInit(config *Config) (string, error) { +func configureSysInit(config *Config, rootUID, rootGID int) (string, error) { // TODO Windows. return os.Getenv("TEMP"), nil } @@ -130,15 +92,17 @@ func isBridgeNetworkDisabled(config *Config) bool { return false } -func initNetworkController(config *Config) (libnetwork.NetworkController, error) { +func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkController, error) { // Set the name of the virtual switch if not specified by -b on daemon start if config.Bridge.VirtualSwitchName == "" { - config.Bridge.VirtualSwitchName = DefaultVirtualSwitch + config.Bridge.VirtualSwitchName = defaultVirtualSwitch } return nil, nil } -func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { +// registerLinks sets up links between containers and writes the +// configuration out for persistence. +func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error { // TODO Windows. Factored out for network modes. There may be more // refactoring required here. @@ -156,7 +120,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig. //An error from daemon.Get() means this name could not be found return fmt.Errorf("Could not get container for %s", name) } - if err := daemon.RegisterLink(container, child, alias); err != nil { + if err := daemon.registerLink(container, child, alias); err != nil { return err } } @@ -164,14 +128,14 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig. // After we load all the links into the daemon // set them to nil on the hostconfig hostConfig.Links = nil - if err := container.WriteHostConfig(); err != nil { + if err := container.writeHostConfig(); err != nil { return err } return nil } -func (daemon *Daemon) newBaseContainer(id string) Container { - return Container{ +func (daemon *Daemon) newBaseContainer(id string) *Container { + return &Container{ CommonContainer: CommonContainer{ ID: id, State: NewState(), @@ -180,3 +144,30 @@ func (daemon *Daemon) newBaseContainer(id string) Container { }, } } + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *Container) error { + // We do not mount if a Hyper-V container + if !container.hostConfig.Isolation.IsHyperV() { + if err := daemon.Mount(container); err != nil { + return err + } + } + return nil +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) { + // We do not unmount if a Hyper-V container + if !container.hostConfig.Isolation.IsHyperV() { + if err := daemon.Unmount(container); err != nil { + logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_zfs.go b/vendor/github.com/docker/docker/daemon/daemon_zfs.go index d27eff72..e5b4dce9 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_zfs.go +++ b/vendor/github.com/docker/docker/daemon/daemon_zfs.go @@ -3,5 +3,6 @@ package daemon import ( + // register the zfs driver _ "github.com/docker/docker/daemon/graphdriver/zfs" ) diff --git a/vendor/github.com/docker/docker/daemon/delete.go b/vendor/github.com/docker/docker/daemon/delete.go index 30bfeddd..f62f71e9 100644 --- a/vendor/github.com/docker/docker/daemon/delete.go +++ b/vendor/github.com/docker/docker/daemon/delete.go @@ -4,15 +4,21 @@ import ( "fmt" "os" "path" - "runtime" "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/volume/store" ) +// ContainerRmConfig is a holder for passing in runtime config. type ContainerRmConfig struct { ForceRemove, RemoveVolume, RemoveLink bool } +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error { container, err := daemon.Get(name) if err != nil { @@ -26,20 +32,20 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error } parent, n := path.Split(name) if parent == "/" { - return fmt.Errorf("Conflict, cannot remove the default name of the container") + return derr.ErrorCodeDefaultName } - pe := daemon.ContainerGraph().Get(parent) + pe := daemon.containerGraph().Get(parent) if pe == nil { - return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + return derr.ErrorCodeNoParent.WithArgs(parent, name) } - if err := daemon.ContainerGraph().Delete(name); err != nil { + if err := daemon.containerGraph().Delete(name); err != nil { return err } parentContainer, _ := daemon.Get(pe.ID()) if parentContainer != nil { - if err := parentContainer.UpdateNetwork(); err != nil { + if err := daemon.updateNetwork(parentContainer); err != nil { logrus.Debugf("Could not update network to remove link %s: %v", n, err) } } @@ -48,12 +54,14 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error } if err := daemon.rm(container, config.ForceRemove); err != nil { - return fmt.Errorf("Cannot destroy container %s: %v", name, err) + // return derr.ErrorCodeCantDestroy.WithArgs(name, utils.GetErrorMessage(err)) + return err } - if config.RemoveVolume { - container.removeMountPoints() + if err := daemon.removeMountPoints(container, config.RemoveVolume); err != nil { + logrus.Error(err) } + return nil } @@ -61,40 +69,38 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) { if container.IsRunning() { if !forceRemove { - return fmt.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f") + return derr.ErrorCodeRmRunning } - if err := container.Kill(); err != nil { - return fmt.Errorf("Could not kill running container, cannot remove - %v", err) + if err := daemon.Kill(container); err != nil { + return derr.ErrorCodeRmFailed.WithArgs(err) } } + // Container state RemovalInProgress should be used to avoid races. + if err = container.setRemovalInProgress(); err != nil { + if err == derr.ErrorCodeAlreadyRemoving { + // do not fail when the removal is in progress started by other request. + return nil + } + return derr.ErrorCodeRmState.WithArgs(err) + } + defer container.resetRemovalInProgress() + // stop collection of stats for the container regardless // if stats are currently getting collected. daemon.statsCollector.stopCollection(container) - element := daemon.containers.Get(container.ID) - if element == nil { - return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) - } - - // Container state RemovalInProgress should be used to avoid races. - if err = container.SetRemovalInProgress(); err != nil { - return fmt.Errorf("Failed to set container state to RemovalInProgress: %s", err) - } - - defer container.ResetRemovalInProgress() - - if err = container.Stop(3); err != nil { + if err = daemon.containerStop(container, 3); err != nil { return err } // Mark container dead. We don't want anybody to be restarting it. - container.SetDead() + container.setDead() // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. - if err := container.ToDisk(); err != nil { + if err := container.toDiskLocking(); err != nil { logrus.Errorf("Error saving dying container to disk: %v", err) } @@ -105,42 +111,52 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) { daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) os.RemoveAll(container.root) - container.LogEvent("destroy") + daemon.LogContainerEvent(container, "destroy") } }() - if _, err := daemon.containerGraph.Purge(container.ID); err != nil { + if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil { logrus.Debugf("Unable to remove container from link graph: %s", err) } if err = daemon.driver.Remove(container.ID); err != nil { - return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) + return derr.ErrorCodeRmDriverFS.WithArgs(daemon.driver, container.ID, err) } - // There will not be an -init on Windows, so don't fail by not attempting to delete it - if runtime.GOOS != "windows" { - initID := fmt.Sprintf("%s-init", container.ID) - if err := daemon.driver.Remove(initID); err != nil { - return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err) - } + initID := fmt.Sprintf("%s-init", container.ID) + if err := daemon.driver.Remove(initID); err != nil { + return derr.ErrorCodeRmInit.WithArgs(daemon.driver, initID, err) } if err = os.RemoveAll(container.root); err != nil { - return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + return derr.ErrorCodeRmFS.WithArgs(container.ID, err) } if err = daemon.execDriver.Clean(container.ID); err != nil { - return fmt.Errorf("Unable to remove execdriver data for %s: %s", container.ID, err) + return derr.ErrorCodeRmExecDriver.WithArgs(container.ID, err) } selinuxFreeLxcContexts(container.ProcessLabel) daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) - container.LogEvent("destroy") + daemon.LogContainerEvent(container, "destroy") return nil } -func (daemon *Daemon) DeleteVolumes(c *Container) error { - return c.removeMountPoints() +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the remote API +func (daemon *Daemon) VolumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + if err := daemon.volumes.Remove(v); err != nil { + if err == store.ErrVolumeInUse { + return derr.ErrorCodeRmVolumeInUse.WithArgs(err) + } + return derr.ErrorCodeRmVolume.WithArgs(name, err) + } + return nil } diff --git a/vendor/github.com/docker/docker/daemon/delete_test.go b/vendor/github.com/docker/docker/daemon/delete_test.go new file mode 100644 index 00000000..9a82fce2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/delete_test.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/runconfig" +) + +func TestContainerDoubleDelete(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + container := &Container{ + CommonContainer: CommonContainer{ + State: NewState(), + Config: &runconfig.Config{}, + }, + } + + // Mark the container as having a delete in progress + if err := container.setRemovalInProgress(); err != nil { + t.Fatal(err) + } + + // Try to remove the container when it's start is removalInProgress. + // It should ignore the container and not return an error. + if err := daemon.rm(container, true); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/discovery.go b/vendor/github.com/docker/docker/daemon/discovery.go new file mode 100644 index 00000000..006eddec --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/discovery.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + + // Register the libkv backends for discovery. + _ "github.com/docker/docker/pkg/discovery/kv" +) + +const ( + // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. + defaultDiscoveryHeartbeat = 20 * time.Second + + // defaultDiscoveryTTL is the default TTL interface for discovery. + defaultDiscoveryTTL = 60 * time.Second +) + +// initDiscovery initialized the nodes discovery subsystem by connecting to the specified backend +// and start a registration loop to advertise the current node under the specified address. +func initDiscovery(backend, address string, clusterOpts map[string]string) (discovery.Backend, error) { + var ( + discoveryBackend discovery.Backend + err error + ) + if discoveryBackend, err = discovery.New(backend, defaultDiscoveryHeartbeat, defaultDiscoveryTTL, clusterOpts); err != nil { + return nil, err + } + + // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, + // but we never actually Watch() for nodes appearing and disappearing for the moment. + go registrationLoop(discoveryBackend, address) + return discoveryBackend, nil +} + +// registrationLoop registers the current node against the discovery backend using the specified +// address. The function never returns, as registration against the backend comes with a TTL and +// requires regular heartbeats. +func registrationLoop(discoveryBackend discovery.Backend, address string) { + for { + if err := discoveryBackend.Register(address); err != nil { + log.Errorf("Registering as %q in discovery failed: %v", address, err) + } + time.Sleep(defaultDiscoveryHeartbeat) + } +} diff --git a/vendor/github.com/docker/docker/daemon/errors.go b/vendor/github.com/docker/docker/daemon/errors.go new file mode 100644 index 00000000..45a48820 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/errors.go @@ -0,0 +1,23 @@ +package daemon + +import ( + "strings" + + derr "github.com/docker/docker/errors" + "github.com/docker/docker/graph/tags" + "github.com/docker/docker/pkg/parsers" +) + +func (d *Daemon) graphNotExistToErrcode(imageName string, err error) error { + if d.Graph().IsNotExist(err, imageName) { + if strings.Contains(imageName, "@") { + return derr.ErrorCodeNoSuchImageHash.WithArgs(imageName) + } + img, tag := parsers.ParseRepositoryTag(imageName) + if tag == "" { + tag = tags.DefaultTag + } + return derr.ErrorCodeNoSuchImageTag.WithArgs(img, tag) + } + return err +} diff --git a/vendor/github.com/docker/docker/daemon/events.go b/vendor/github.com/docker/docker/daemon/events.go new file mode 100644 index 00000000..85ac25c0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events.go @@ -0,0 +1,10 @@ +package daemon + +// LogContainerEvent generates an event related to a container. +func (daemon *Daemon) LogContainerEvent(container *Container, action string) { + daemon.EventsService.Log( + action, + container.ID, + container.Config.Image, + ) +} diff --git a/vendor/github.com/docker/docker/daemon/exec.go b/vendor/github.com/docker/docker/daemon/exec.go index 8b163aec..299aaf60 100644 --- a/vendor/github.com/docker/docker/daemon/exec.go +++ b/vendor/github.com/docker/docker/daemon/exec.go @@ -1,7 +1,6 @@ package daemon import ( - "fmt" "io" "io/ioutil" "strings" @@ -10,20 +9,26 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/pkg/broadcastwriter" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/broadcaster" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/runconfig" ) -type execConfig struct { +// ExecConfig holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type ExecConfig struct { sync.Mutex ID string Running bool ExitCode int ProcessConfig *execdriver.ProcessConfig - StreamConfig + streamConfig OpenStdin bool OpenStderr bool OpenStdout bool @@ -35,21 +40,21 @@ type execConfig struct { } type execStore struct { - s map[string]*execConfig + s map[string]*ExecConfig sync.RWMutex } func newExecStore() *execStore { - return &execStore{s: make(map[string]*execConfig, 0)} + return &execStore{s: make(map[string]*ExecConfig, 0)} } -func (e *execStore) Add(id string, execConfig *execConfig) { +func (e *execStore) Add(id string, ExecConfig *ExecConfig) { e.Lock() - e.s[id] = execConfig + e.s[id] = ExecConfig e.Unlock() } -func (e *execStore) Get(id string) *execConfig { +func (e *execStore) Get(id string) *ExecConfig { e.RLock() res := e.s[id] e.RUnlock() @@ -72,24 +77,35 @@ func (e *execStore) List() []string { return IDs } -func (execConfig *execConfig) Resize(h, w int) error { +func (ExecConfig *ExecConfig) resize(h, w int) error { select { - case <-execConfig.waitStart: + case <-ExecConfig.waitStart: case <-time.After(time.Second): - return fmt.Errorf("Exec %s is not running, so it can not be resized.", execConfig.ID) + return derr.ErrorCodeExecResize.WithArgs(ExecConfig.ID) } - return execConfig.ProcessConfig.Terminal.Resize(h, w) + return ExecConfig.ProcessConfig.Terminal.Resize(h, w) } -func (d *Daemon) registerExecCommand(execConfig *execConfig) { +func (d *Daemon) registerExecCommand(ExecConfig *ExecConfig) { // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. - execConfig.Container.execCommands.Add(execConfig.ID, execConfig) + ExecConfig.Container.execCommands.Add(ExecConfig.ID, ExecConfig) // Storing execs in daemon for easy access via remote API. - d.execCommands.Add(execConfig.ID, execConfig) + d.execCommands.Add(ExecConfig.ID, ExecConfig) } -func (d *Daemon) getExecConfig(name string) (*execConfig, error) { - execConfig := d.execCommands.Get(name) +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getExecConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*ExecConfig, error) { + ec := d.execCommands.Get(name) // If the exec is found but its container is not in the daemon's list of // containers then it must have been delete, in which case instead of @@ -97,20 +113,22 @@ func (d *Daemon) getExecConfig(name string) (*execConfig, error) { // the user sees the same error now that they will after the // 5 minute clean-up loop is run which erases old/dead execs. - if execConfig != nil && d.containers.Get(execConfig.Container.ID) != nil { - - if !execConfig.Container.IsRunning() { - return nil, fmt.Errorf("Container %s is not running", execConfig.Container.ID) + if ec != nil && d.containers.Get(ec.Container.ID) != nil { + if !ec.Container.IsRunning() { + return nil, derr.ErrorCodeContainerNotRunning.WithArgs(ec.Container.ID, ec.Container.State.String()) } - return execConfig, nil + if ec.Container.isPaused() { + return nil, derr.ErrorCodeExecPaused.WithArgs(ec.Container.ID) + } + return ec, nil } - return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name) + return nil, derr.ErrorCodeNoExecID.WithArgs(name) } -func (d *Daemon) unregisterExecCommand(execConfig *execConfig) { - execConfig.Container.execCommands.Delete(execConfig.ID) - d.execCommands.Delete(execConfig.ID) +func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) { + ExecConfig.Container.execCommands.Delete(ExecConfig.ID) + d.execCommands.Delete(ExecConfig.ID) } func (d *Daemon) getActiveContainer(name string) (*Container, error) { @@ -120,27 +138,23 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) { } if !container.IsRunning() { - return nil, fmt.Errorf("Container %s is not running", name) + return nil, derr.ErrorCodeNotRunning.WithArgs(name) } - if container.IsPaused() { - return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name) + if container.isPaused() { + return nil, derr.ErrorCodeExecPaused.WithArgs(name) } return container, nil } +// ContainerExecCreate sets up an exec in a running container. func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) { - // Not all drivers support Exec (LXC for example) - if err := checkExecSupport(d.execDriver.Name()); err != nil { - return "", err - } - container, err := d.getActiveContainer(config.Container) if err != nil { return "", err } - cmd := runconfig.NewCommand(config.Cmd...) - entrypoint, args := d.getEntrypointAndArgs(runconfig.NewEntrypoint(), cmd) + cmd := stringutils.NewStrSlice(config.Cmd...) + entrypoint, args := d.getEntrypointAndArgs(stringutils.NewStrSlice(), cmd) user := config.User if len(user) == 0 { @@ -152,58 +166,54 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro Entrypoint: entrypoint, Arguments: args, User: user, + Privileged: config.Privileged, } - execConfig := &execConfig{ + ExecConfig := &ExecConfig{ ID: stringid.GenerateNonCryptoID(), OpenStdin: config.AttachStdin, OpenStdout: config.AttachStdout, OpenStderr: config.AttachStderr, - StreamConfig: StreamConfig{}, + streamConfig: streamConfig{}, ProcessConfig: processConfig, Container: container, Running: false, waitStart: make(chan struct{}), } - d.registerExecCommand(execConfig) + d.registerExecCommand(ExecConfig) - container.LogEvent("exec_create: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " ")) - - return execConfig.ID, nil + d.LogContainerEvent(container, "exec_create: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " ")) + return ExecConfig.ID, nil } -func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { - +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer ) - execConfig, err := d.getExecConfig(execName) + ec, err := d.getExecConfig(name) if err != nil { - return err + return derr.ErrorCodeNoExecID.WithArgs(name) } - func() { - execConfig.Lock() - defer execConfig.Unlock() - if execConfig.Running { - err = fmt.Errorf("Error: Exec command %s is already running", execName) - } - execConfig.Running = true - }() - if err != nil { - return err + ec.Lock() + if ec.Running { + ec.Unlock() + return derr.ErrorCodeExecRunning.WithArgs(ec.ID) } + ec.Running = true + ec.Unlock() - logrus.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID) - container := execConfig.Container + logrus.Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID) + container := ec.Container + d.LogContainerEvent(container, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " ")) - container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " ")) - - if execConfig.OpenStdin { + if ec.OpenStdin { r, w := io.Pipe() go func() { defer w.Close() @@ -212,58 +222,70 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout }() cStdin = r } - if execConfig.OpenStdout { + if ec.OpenStdout { cStdout = stdout } - if execConfig.OpenStderr { + if ec.OpenStderr { cStderr = stderr } - execConfig.StreamConfig.stderr = broadcastwriter.New() - execConfig.StreamConfig.stdout = broadcastwriter.New() + ec.streamConfig.stderr = new(broadcaster.Unbuffered) + ec.streamConfig.stdout = new(broadcaster.Unbuffered) // Attach to stdin - if execConfig.OpenStdin { - execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe() + if ec.OpenStdin { + ec.streamConfig.stdin, ec.streamConfig.stdinPipe = io.Pipe() } else { - execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin + ec.streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } - attachErr := attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) + attachErr := attach(&ec.streamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr) execErr := make(chan error) - // Note, the execConfig data will be removed when the container + // Note, the ExecConfig data will be removed when the container // itself is deleted. This allows us to query it (for things like // the exitStatus) even after the cmd is done running. go func() { - if err := container.Exec(execConfig); err != nil { - execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) - } + execErr <- d.containerExec(container, ec) }() + select { case err := <-attachErr: if err != nil { - return fmt.Errorf("attach failed with error: %s", err) + return derr.ErrorCodeExecAttach.WithArgs(err) } - break + return nil case err := <-execErr: - return err - } + if aErr := <-attachErr; aErr != nil && err == nil { + return derr.ErrorCodeExecAttach.WithArgs(aErr) + } + if err == nil { + return nil + } - return nil + // Maybe the container stopped while we were trying to exec + if !container.IsRunning() { + return derr.ErrorCodeExecContainerStopped + } + return derr.ErrorCodeExecCantRun.WithArgs(ec.ID, container.ID, err) + } } -func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - exitStatus, err := d.execDriver.Exec(c.command, execConfig.ProcessConfig, pipes, startCallback) +// Exec calls the underlying exec driver to run +func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) { + hooks := execdriver.Hooks{ + Start: startCallback, + } + exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, hooks) // On err, make sure we don't leave ExitCode at zero if err != nil && exitStatus == 0 { exitStatus = 128 } - execConfig.ExitCode = exitStatus - execConfig.Running = false + ExecConfig.ExitCode = exitStatus + ExecConfig.Running = false return exitStatus, err } @@ -303,3 +325,67 @@ func (d *Daemon) containerExecIds() map[string]struct{} { } return ids } + +func (d *Daemon) containerExec(container *Container, ec *ExecConfig) error { + container.Lock() + defer container.Unlock() + + callback := func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error { + if processConfig.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave + // which we close here. + if c, ok := processConfig.Stdout.(io.Closer); ok { + c.Close() + } + } + close(ec.waitStart) + return nil + } + + // We use a callback here instead of a goroutine and an chan for + // synchronization purposes + cErr := promise.Go(func() error { return d.monitorExec(container, ec, callback) }) + + // Exec should not return until the process is actually running + select { + case <-ec.waitStart: + case err := <-cErr: + return err + } + + return nil +} + +func (d *Daemon) monitorExec(container *Container, ExecConfig *ExecConfig, callback execdriver.DriverCallback) error { + var ( + err error + exitCode int + ) + pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin) + exitCode, err = d.Exec(container, ExecConfig, pipes, callback) + if err != nil { + logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) + } + logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) + if ExecConfig.OpenStdin { + if err := ExecConfig.streamConfig.stdin.Close(); err != nil { + logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err) + } + } + if err := ExecConfig.streamConfig.stdout.Clean(); err != nil { + logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err) + } + if err := ExecConfig.streamConfig.stderr.Clean(); err != nil { + logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err) + } + if ExecConfig.ProcessConfig.Terminal != nil { + if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil { + logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) + } + } + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + container.execCommands.Delete(ExecConfig.ID) + return err +} diff --git a/vendor/github.com/docker/docker/daemon/exec_freebsd.go b/vendor/github.com/docker/docker/daemon/exec_freebsd.go deleted file mode 100644 index 6f1e5e3c..00000000 --- a/vendor/github.com/docker/docker/daemon/exec_freebsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build freebsd - -package daemon - -// checkExecSupport returns an error if the exec driver does not support exec, -// or nil if it is supported. -func checkExecSupport(drivername string) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/exec_linux.go b/vendor/github.com/docker/docker/daemon/exec_linux.go deleted file mode 100644 index a3603263..00000000 --- a/vendor/github.com/docker/docker/daemon/exec_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux - -package daemon - -import ( - "strings" - - "github.com/docker/docker/daemon/execdriver/lxc" -) - -// checkExecSupport returns an error if the exec driver does not support exec, -// or nil if it is supported. -func checkExecSupport(drivername string) error { - if strings.HasPrefix(drivername, lxc.DriverName) { - return lxc.ErrExec - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/exec_windows.go b/vendor/github.com/docker/docker/daemon/exec_windows.go deleted file mode 100644 index d6f244e6..00000000 --- a/vendor/github.com/docker/docker/daemon/exec_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package daemon - -// checkExecSupport returns an error if the exec driver does not support exec, -// or nil if it is supported. -func checkExecSupport(DriverName string) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/export.go b/vendor/github.com/docker/docker/daemon/export.go index 0286b496..0a246ed5 100644 --- a/vendor/github.com/docker/docker/daemon/export.go +++ b/vendor/github.com/docker/docker/daemon/export.go @@ -1,25 +1,54 @@ package daemon import ( - "fmt" "io" + + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" ) +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { container, err := daemon.Get(name) if err != nil { return err } - data, err := container.Export() + data, err := daemon.containerExport(container) if err != nil { - return fmt.Errorf("%s: %s", name, err) + return derr.ErrorCodeExportFailed.WithArgs(name, err) } defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(out, data); err != nil { - return fmt.Errorf("%s: %s", name, err) + return derr.ErrorCodeExportFailed.WithArgs(name, err) } return nil } + +func (daemon *Daemon) containerExport(container *Container) (archive.Archive, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archive, err := archive.TarWithOptions(container.basefs, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/vendor/github.com/docker/docker/daemon/history.go b/vendor/github.com/docker/docker/daemon/history.go index f7175087..3fc10da0 100644 --- a/vendor/github.com/docker/docker/daemon/history.go +++ b/vendor/github.com/docker/docker/daemon/history.go @@ -22,10 +22,11 @@ func (history *History) Swap(i, j int) { containers[i], containers[j] = containers[j], containers[i] } +// Add the given container to history. func (history *History) Add(container *Container) { *history = append(*history, container) } -func (history *History) Sort() { +func (history *History) sort() { sort.Sort(history) } diff --git a/vendor/github.com/docker/docker/daemon/image_delete.go b/vendor/github.com/docker/docker/daemon/image_delete.go index 3929b133..ca8b82a1 100644 --- a/vendor/github.com/docker/docker/daemon/image_delete.go +++ b/vendor/github.com/docker/docker/daemon/image_delete.go @@ -4,177 +4,343 @@ import ( "fmt" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" - "github.com/docker/docker/graph" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/graph/tags" "github.com/docker/docker/image" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/utils" ) -// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/ -func (daemon *Daemon) ImageDelete(name string, force, noprune bool) ([]types.ImageDelete, error) { - list := []types.ImageDelete{} - if err := daemon.imgDeleteHelper(name, &list, true, force, noprune); err != nil { - return nil, err - } - if len(list) == 0 { - return nil, fmt.Errorf("Conflict, %s wasn't deleted", name) - } +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendent image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { + records := []types.ImageDelete{} - return list, nil -} - -func (daemon *Daemon) imgDeleteHelper(name string, list *[]types.ImageDelete, first, force, noprune bool) error { - var ( - repoName, tag string - tags = []string{} - ) - repoAndTags := make(map[string][]string) - - // FIXME: please respect DRY and centralize repo+tag parsing in a single central place! -- shykes - repoName, tag = parsers.ParseRepositoryTag(name) - if tag == "" { - tag = graph.DefaultTag - } - - if name == "" { - return fmt.Errorf("Image name can not be blank") - } - - img, err := daemon.Repositories().LookupImage(name) + img, err := daemon.repositories.LookupImage(imageRef) if err != nil { - if r, _ := daemon.Repositories().Get(repoName); r != nil { - return fmt.Errorf("No such image: %s", utils.ImageReference(repoName, tag)) - } - return fmt.Errorf("No such image: %s", name) + return nil, daemon.graphNotExistToErrcode(imageRef, err) } - if strings.Contains(img.ID, name) { - repoName = "" - tag = "" - } - - byParents := daemon.Graph().ByParent() - - repos := daemon.Repositories().ByID()[img.ID] - - //If delete by id, see if the id belong only to one repository - deleteByID := repoName == "" - if deleteByID { - for _, repoAndTag := range repos { - parsedRepo, parsedTag := parsers.ParseRepositoryTag(repoAndTag) - if repoName == "" || repoName == parsedRepo { - repoName = parsedRepo - if parsedTag != "" { - repoAndTags[repoName] = append(repoAndTags[repoName], parsedTag) - } - } else if repoName != parsedRepo && !force && first { - // the id belongs to multiple repos, like base:latest and user:test, - // in that case return conflict - return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) - } else { - //the id belongs to multiple repos, with -f just delete all - repoName = parsedRepo - if parsedTag != "" { - repoAndTags[repoName] = append(repoAndTags[repoName], parsedTag) - } + var removedRepositoryRef bool + if !isImageIDPrefix(img.ID, imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !(force || daemon.imageHasMultipleRepositoryReferences(img.ID)) { + if container := daemon.getContainerUsingImage(img.ID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + return nil, derr.ErrorCodeImgDelUsed.WithArgs(imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(img.ID)) } } + + parsedRef, err := daemon.removeImageRef(imageRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef} + + daemon.EventsService.Log("untag", img.ID, "") + records = append(records, untaggedRecord) + + // If has remaining references then untag finishes the remove + if daemon.repositories.HasReferences(img) { + return records, nil + } + + removedRepositoryRef = true } else { - repoAndTags[repoName] = append(repoAndTags[repoName], tag) + // If an ID reference was given AND there is exactly one + // repository reference to the image then we will want to + // remove that reference. + // FIXME: Is this the behavior we want? + repoRefs := daemon.repositories.ByID()[img.ID] + if len(repoRefs) == 1 { + parsedRef, err := daemon.removeImageRef(repoRefs[0]) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef} + + daemon.EventsService.Log("untag", img.ID, "") + records = append(records, untaggedRecord) + } } - if !first && len(repoAndTags) > 0 { + return records, daemon.imageDeleteHelper(img, &records, force, prune, removedRepositoryRef) +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + return strings.HasPrefix(imageID, possiblePrefix) +} + +// imageHasMultipleRepositoryReferences returns whether there are multiple +// repository references to the given imageID. +func (daemon *Daemon) imageHasMultipleRepositoryReferences(imageID string) bool { + return len(daemon.repositories.ByID()[imageID]) > 1 +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID string) *Container { + for _, container := range daemon.List() { + if container.ImageID == imageID { + return container + } + } + + return nil +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) { + repository, ref := parsers.ParseRepositoryTag(repositoryRef) + if ref == "" { + ref = tags.DefaultTag + } + + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.repositories.Delete(repository, ref) + + return utils.ImageReference(repository, ref), err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDelete is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID string, records *[]types.ImageDelete) error { + imageRefs := daemon.repositories.ByID()[imgID] + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef} + + daemon.EventsService.Log("untag", imgID, "") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + imgID string + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + if conflict := daemon.checkImageDeleteConflict(img, force); conflict != nil { + if quiet && !daemon.imageIsDangling(img) { + // Ignore conflicts UNLESS the image is "dangling" in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(img.ID, records); err != nil { + return err + } + + if err := daemon.Graph().Delete(img.ID); err != nil { + return err + } + + daemon.EventsService.Log("delete", img.ID, "") + *records = append(*records, types.ImageDelete{Deleted: img.ID}) + + if !prune || img.Parent == "" { return nil } - if len(repos) <= 1 || deleteByID { - if err := daemon.canDeleteImage(img.ID, force); err != nil { - return err - } + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + parentImg, err := daemon.Graph().Get(img.Parent) + if err != nil { + return derr.ErrorCodeImgNoParent.WithArgs(err) } - // Untag the current image - for repoName, tags := range repoAndTags { - for _, tag := range tags { - tagDeleted, err := daemon.Repositories().Delete(repoName, tag) - if err != nil { - return err - } - if tagDeleted { - *list = append(*list, types.ImageDelete{ - Untagged: utils.ImageReference(repoName, tag), - }) - daemon.EventsService.Log("untag", img.ID, "") - } - } - } - tags = daemon.Repositories().ByID()[img.ID] - if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { - if len(byParents[img.ID]) == 0 { - if err := daemon.Repositories().DeleteAll(img.ID); err != nil { - return err - } - if err := daemon.Graph().Delete(img.ID); err != nil { - return err - } - *list = append(*list, types.ImageDelete{ - Deleted: img.ID, - }) - daemon.EventsService.Log("delete", img.ID, "") - if img.Parent != "" && !noprune { - err := daemon.imgDeleteHelper(img.Parent, list, false, force, noprune) - if first { - return err - } - - } - - } - } - return nil + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parentImg, records, false, true, true) } -func (daemon *Daemon) canDeleteImage(imgID string, force bool) error { - if daemon.Graph().IsHeld(imgID) { - return fmt.Errorf("Conflict, cannot delete because %s is held by an ongoing pull or build", stringid.TruncateID(imgID)) +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict { + // Check for hard conflicts first. + if conflict := daemon.checkImageDeleteHardConflict(img); conflict != nil { + return conflict } + + // Then check for soft conflicts. + if ignoreSoftConflicts { + // Don't bother checking for soft conflicts. + return nil + } + + return daemon.checkImageDeleteSoftConflict(img) +} + +func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDeleteConflict { + // Check if the image ID is being used by a pull or build. + if daemon.Graph().IsHeld(img.ID) { + return &imageDeleteConflict{ + hard: true, + imgID: img.ID, + message: "image is held by an ongoing pull or build", + } + } + + // Check if the image has any descendent images. + if daemon.Graph().HasChildren(img.ID) { + return &imageDeleteConflict{ + hard: true, + imgID: img.ID, + message: "image has dependent child images", + } + } + + // Check if any running container is using the image. for _, container := range daemon.List() { - if container.ImageID == "" { - // This technically should never happen, but if the container - // has no ImageID then log the situation and move on. - // If we allowed processing to continue then the code later - // on would fail with a "Prefix can't be empty" error even - // though the bad container has nothing to do with the image - // we're trying to delete. - logrus.Errorf("Container %q has no image associated with it!", container.ID) + if !container.IsRunning() { + // Skip this until we check for soft conflicts later. continue } - parent, err := daemon.Repositories().LookupImage(container.ImageID) - if err != nil { - if daemon.Graph().IsNotExist(err, container.ImageID) { - continue - } - return err - } - if err := daemon.graph.WalkHistory(parent, func(p image.Image) error { - if imgID == p.ID { - if container.IsRunning() { - if force { - return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", stringid.TruncateID(imgID), stringid.TruncateID(container.ID)) - } - return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", stringid.TruncateID(imgID), stringid.TruncateID(container.ID)) - } else if !force { - return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", stringid.TruncateID(imgID), stringid.TruncateID(container.ID)) - } + if container.ImageID == img.ID { + return &imageDeleteConflict{ + imgID: img.ID, + hard: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), } - return nil - }); err != nil { - return err } } + return nil } + +func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDeleteConflict { + // Check if any repository tags/digest reference this image. + if daemon.repositories.HasReferences(img) { + return &imageDeleteConflict{ + imgID: img.ID, + message: "image is referenced in one or more repositories", + } + } + + // Check if any stopped containers reference this image. + for _, container := range daemon.List() { + if container.IsRunning() { + // Skip this as it was checked above in hard conflict conditions. + continue + } + + if container.ImageID == img.ID { + return &imageDeleteConflict{ + imgID: img.ID, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(img *image.Image) bool { + return !(daemon.repositories.HasReferences(img) || daemon.Graph().HasChildren(img.ID)) +} diff --git a/vendor/github.com/docker/docker/daemon/info.go b/vendor/github.com/docker/docker/daemon/info.go index 387c3995..2ef7b668 100644 --- a/vendor/github.com/docker/docker/daemon/info.go +++ b/vendor/github.com/docker/docker/daemon/info.go @@ -7,15 +7,17 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" - "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) +// SystemInfo returns information about the host server the daemon is running on. func (daemon *Daemon) SystemInfo() (*types.Info, error) { images := daemon.Graph().Map() var imgcount int @@ -49,22 +51,27 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { logrus.Errorf("Could not read system memory info: %v", err) } - // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) + // if we still have the original dockerinit binary from before + // we copied it locally, let's return the path to that, since + // that's more intuitive (the copied path is trivial to derive + // by hand given VERSION) initPath := utils.DockerInitPath("") if initPath == "" { // if that fails, we'll just return the path from the daemon - initPath = daemon.SystemInitPath() + initPath = daemon.systemInitPath() } + sysInfo := sysinfo.New(true) + v := &types.Info{ ID: daemon.ID, Containers: len(daemon.List()), Images: imgcount, Driver: daemon.GraphDriver().String(), DriverStatus: daemon.GraphDriver().Status(), - IPv4Forwarding: !daemon.SystemConfig().IPv4ForwardingDisabled, - BridgeNfIptables: !daemon.SystemConfig().BridgeNfCallIptablesDisabled, - BridgeNfIp6tables: !daemon.SystemConfig().BridgeNfCallIp6tablesDisabled, + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNfCallIptablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNfCallIP6tablesDisabled, Debug: os.Getenv("DEBUG") != "", NFd: fileutils.GetTotalUsedFds(), NGoroutines: runtime.NumGoroutine(), @@ -76,13 +83,16 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { OperatingSystem: operatingSystem, IndexServerAddress: registry.IndexServer, RegistryConfig: daemon.RegistryService.Config, - InitSha1: dockerversion.INITSHA1, + InitSha1: dockerversion.InitSHA1, InitPath: initPath, NCPU: runtime.NumCPU(), MemTotal: meminfo.MemTotal, - DockerRootDir: daemon.Config().Root, - Labels: daemon.Config().Labels, + DockerRootDir: daemon.config().Root, + Labels: daemon.config().Labels, ExperimentalBuild: utils.ExperimentalBuild(), + ServerVersion: dockerversion.Version, + ClusterStore: daemon.config().ClusterStore, + ClusterAdvertise: daemon.config().ClusterAdvertise, } // TODO Windows. Refactor this more once sysinfo is refactored into @@ -90,18 +100,18 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { // sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if // an attempt is made to access through them. if runtime.GOOS != "windows" { - v.MemoryLimit = daemon.SystemConfig().MemoryLimit - v.SwapLimit = daemon.SystemConfig().SwapLimit - v.OomKillDisable = daemon.SystemConfig().OomKillDisable - v.CpuCfsPeriod = daemon.SystemConfig().CpuCfsPeriod - v.CpuCfsQuota = daemon.SystemConfig().CpuCfsQuota + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota } if httpProxy := os.Getenv("http_proxy"); httpProxy != "" { - v.HttpProxy = httpProxy + v.HTTPProxy = httpProxy } if httpsProxy := os.Getenv("https_proxy"); httpsProxy != "" { - v.HttpsProxy = httpsProxy + v.HTTPSProxy = httpsProxy } if noProxy := os.Getenv("no_proxy"); noProxy != "" { v.NoProxy = noProxy diff --git a/vendor/github.com/docker/docker/daemon/inspect.go b/vendor/github.com/docker/docker/daemon/inspect.go index df909e18..3107b3bd 100644 --- a/vendor/github.com/docker/docker/daemon/inspect.go +++ b/vendor/github.com/docker/docker/daemon/inspect.go @@ -5,9 +5,14 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/daemon/network" ) -func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) { +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool) (*types.ContainerJSON, error) { container, err := daemon.Get(name) if err != nil { return nil, err @@ -16,21 +21,74 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error container.Lock() defer container.Unlock() - base, err := daemon.getInspectData(container) + base, err := daemon.getInspectData(container, size) if err != nil { return nil, err } mountPoints := addMountPoints(container) + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + Ports: container.NetworkSettings.Ports, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: container.NetworkSettings.Networks, + } - return &types.ContainerJSON{base, mountPoints, container.Config}, nil + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil } -func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSONBase, error) { +// ContainerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) ContainerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.Get(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.hostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *Container, size bool) (*types.ContainerJSONBase, error) { // make a copy to play with hostConfig := *container.hostConfig - if children, err := daemon.Children(container.Name); err == nil { + if children, err := daemon.children(container.Name); err == nil { for linkAlias, child := range children { hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } @@ -38,10 +96,15 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON // we need this trick to preserve empty log driver, so // container will use daemon defaults even if daemon change them if hostConfig.LogConfig.Type == "" { - hostConfig.LogConfig = daemon.defaultLogConfig + hostConfig.LogConfig.Type = daemon.defaultLogConfig.Type + } + + if len(hostConfig.LogConfig.Config) == 0 { + hostConfig.LogConfig.Config = daemon.defaultLogConfig.Config } containerState := &types.ContainerState{ + Status: container.State.StateString(), Running: container.State.Running, Paused: container.State.Paused, Restarting: container.State.Restarting, @@ -55,22 +118,30 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON } contJSONBase := &types.ContainerJSONBase{ - Id: container.ID, - Created: container.Created.Format(time.RFC3339Nano), - Path: container.Path, - Args: container.Args, - State: containerState, - Image: container.ImageID, - NetworkSettings: container.NetworkSettings, - LogPath: container.LogPath, - Name: container.Name, - RestartCount: container.RestartCount, - Driver: container.Driver, - ExecDriver: container.ExecDriver, - MountLabel: container.MountLabel, - ProcessLabel: container.ProcessLabel, - ExecIDs: container.GetExecIDs(), - HostConfig: &hostConfig, + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID, + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.getExecIDs(), + HostConfig: &hostConfig, + } + + var ( + sizeRw int64 + sizeRootFs int64 + ) + if size { + sizeRw, sizeRootFs = daemon.getSize(container) + contJSONBase.SizeRw = &sizeRw + contJSONBase.SizeRootFs = &sizeRootFs } // Now set any platform-specific fields @@ -86,10 +157,59 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON return contJSONBase, nil } -func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) { +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) { eConfig, err := daemon.getExecConfig(id) if err != nil { return nil, err } return eConfig, nil } + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + return volumeToAPIType(v), nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*network.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_unix.go b/vendor/github.com/docker/docker/daemon/inspect_unix.go index 7d326543..d445660f 100644 --- a/vendor/github.com/docker/docker/daemon/inspect_unix.go +++ b/vendor/github.com/docker/docker/daemon/inspect_unix.go @@ -2,7 +2,10 @@ package daemon -import "github.com/docker/docker/api/types" +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p19" +) // This sets platform-specific fields func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { @@ -14,7 +17,8 @@ func setPlatformSpecificContainerFields(container *Container, contJSONBase *type return contJSONBase } -func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) { +// ContainerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) ContainerInspectPre120(name string) (*v1p19.ContainerJSON, error) { container, err := daemon.Get(name) if err != nil { return nil, err @@ -23,7 +27,7 @@ func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONP container.Lock() defer container.Unlock() - base, err := daemon.getInspectData(container) + base, err := daemon.getInspectData(container, false) if err != nil { return nil, err } @@ -35,15 +39,26 @@ func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONP volumesRW[m.Destination] = m.RW } - config := &types.ContainerConfig{ - container.Config, - container.hostConfig.Memory, - container.hostConfig.MemorySwap, - container.hostConfig.CPUShares, - container.hostConfig.CpusetCpus, + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.hostConfig.VolumeDriver, + Memory: container.hostConfig.Memory, + MemorySwap: container.hostConfig.MemorySwap, + CPUShares: container.hostConfig.CPUShares, + CPUSet: container.hostConfig.CpusetCpus, } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) - return &types.ContainerJSONPre120{base, volumes, volumesRW, config}, nil + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil } func addMountPoints(container *Container) []types.MountPoint { diff --git a/vendor/github.com/docker/docker/daemon/inspect_windows.go b/vendor/github.com/docker/docker/daemon/inspect_windows.go index c5baa138..26b38616 100644 --- a/vendor/github.com/docker/docker/daemon/inspect_windows.go +++ b/vendor/github.com/docker/docker/daemon/inspect_windows.go @@ -8,5 +8,20 @@ func setPlatformSpecificContainerFields(container *Container, contJSONBase *type } func addMountPoints(container *Container) []types.MountPoint { - return nil + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +// ContainerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSON, error) { + return daemon.ContainerInspect(name, false) } diff --git a/vendor/github.com/docker/docker/daemon/kill.go b/vendor/github.com/docker/docker/daemon/kill.go index 7a4d9ce8..8b94fa3a 100644 --- a/vendor/github.com/docker/docker/daemon/kill.go +++ b/vendor/github.com/docker/docker/daemon/kill.go @@ -1,6 +1,15 @@ package daemon -import "syscall" +import ( + "fmt" + "runtime" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/signal" +) // ContainerKill send signal to the container // If no signal is given (sig 0), then Kill with SIGKILL and wait @@ -12,16 +21,102 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error { return err } + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { - if err := container.Kill(); err != nil { + if err := daemon.Kill(container); err != nil { return err } } else { // Otherwise, just send the requested signal - if err := container.KillSig(int(sig)); err != nil { + if err := daemon.killWithSignal(container, int(sig)); err != nil { return err } } return nil } + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *Container, sig int) error { + logrus.Debugf("Sending %d to %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return derr.ErrorCodeUnpauseContainer.WithArgs(container.ID) + } + + if !container.Running { + return derr.ErrorCodeNotRunning.WithArgs(container.ID) + } + + container.ExitOnNext() + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on it's next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + return err + } + + daemon.LogContainerEvent(container, "kill") + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *Container) error { + if !container.IsRunning() { + return derr.ErrorCodeNotRunning.WithArgs(container.ID) + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be exec driver specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + + if container.IsRunning() { + container.WaitStop(2 * time.Second) + if container.IsRunning() { + return err + } + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + return err + } + + container.WaitStop(-1 * time.Second) + return nil +} + +// killPossibleDeadProcess is a wrapper aroung killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPID(), sig) + return nil + } + return err +} diff --git a/vendor/github.com/docker/docker/daemon/list.go b/vendor/github.com/docker/docker/daemon/list.go index 5c8315cb..b91e69a7 100644 --- a/vendor/github.com/docker/docker/daemon/list.go +++ b/vendor/github.com/docker/docker/daemon/list.go @@ -6,41 +6,135 @@ import ( "strconv" "strings" + "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/graph" + "github.com/docker/docker/image" "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/parsers/filters" ) +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*Container, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + // List returns an array of all containers registered in the daemon. func (daemon *Daemon) List() []*Container { return daemon.containers.List() } +// ContainersConfig is the filtering specified by the user to iterate over containers. type ContainersConfig struct { - All bool - Since string - Before string - Limit int - Size bool + // if true show all containers, otherwise only running containers. + All bool + // show all containers created after this container id + Since string + // show all containers created before this container id + Before string + // number of containers to return at most + Limit int + // if true include the sizes of the containers + Size bool + // return only containers that match filters Filters string } +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[string]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + // beforeContainer is a filter to ignore containers that appear before the one given + beforeContainer *Container + // sinceContainer is a filter to stop the filtering when the iterator arrive to the given container + sinceContainer *Container + // ContainersConfig is the filters set by the user + *ContainersConfig +} + +// Containers returns the list of containers to show given the user's filtering. func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) { - var ( - foundBefore bool - displayed int - all = config.All - n = config.Limit - psFilters filters.Args - filtExited []int - ) + return daemon.reduceContainers(config, daemon.transformContainer) +} + +// reduceContainer parses the user filtering and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer containerReducer) ([]*types.Container, error) { containers := []*types.Container{} + ctx, err := daemon.foldFilter(config) + if err != nil { + return nil, err + } + + for _, container := range daemon.List() { + t, err := daemon.reducePsContainer(container, ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { + container.Lock() + defer container.Unlock() + + // filter containers to return + action := includeContainerInList(container, ctx) + switch action { + case excludeContainer: + return nil, nil + case stopIteration: + return nil, errStopIteration + } + + // transform internal container struct into api structs + return reducer(container, ctx) +} + +// foldFilter generates the container filter based in the user's filtering options. +func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) { psFilters, err := filters.FromParam(config.Filters) if err != nil { return nil, err } + + var filtExited []int if i, ok := psFilters["exited"]; ok { for _, value := range i { code, err := strconv.Atoi(value) @@ -57,12 +151,35 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, return nil, errors.New("Unrecognised filter value for status") } if value == "exited" || value == "created" { - all = true + config.All = true } } } - names := map[string][]string{} - daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { + + imagesFilter := map[string]bool{} + var ancestorFilter bool + if ancestors, ok := psFilters["ancestor"]; ok { + ancestorFilter = true + byParents := daemon.Graph().ByParent() + // The idea is to walk the graph down the most "efficient" way. + for _, ancestor := range ancestors { + // First, get the imageId of the ancestor filter (yay) + image, err := daemon.repositories.LookupImage(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + continue + } + if imagesFilter[ancestor] { + // Already seen this ancestor, skip it + continue + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, image.ID, byParents) + } + } + + names := make(map[string][]string) + daemon.containerGraph().Walk("/", func(p string, e *graphdb.Entity) error { names[e.ID()] = append(names[e.ID()], p) return nil }, 1) @@ -82,124 +199,228 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, } } - errLast := errors.New("last container") - writeCont := func(container *Container) error { - container.Lock() - defer container.Unlock() - if !container.Running && !all && n <= 0 && config.Since == "" && config.Before == "" { - return nil - } - if !psFilters.Match("name", container.Name) { - return nil - } + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + names: names, + images: imagesFilter, + exitAllowed: filtExited, + beforeContainer: beforeCont, + sinceContainer: sinceCont, + ContainersConfig: config, + }, nil +} - if !psFilters.Match("id", container.ID) { - return nil - } - - if !psFilters.MatchKVList("label", container.Config.Labels) { - return nil - } - - if config.Before != "" && !foundBefore { - if container.ID == beforeCont.ID { - foundBefore = true - } - return nil - } - if n > 0 && displayed == n { - return errLast - } - if config.Since != "" { - if container.ID == sinceCont.ID { - return errLast - } - } - if len(filtExited) > 0 { - shouldSkip := true - for _, code := range filtExited { - if code == container.ExitCode && !container.Running { - shouldSkip = false - break - } - } - if shouldSkip { - return nil - } - } - - if !psFilters.Match("status", container.State.StateString()) { - return nil - } - displayed++ - newC := &types.Container{ - ID: container.ID, - Names: names[container.ID], - } - newC.Image = container.Config.Image - if len(container.Args) > 0 { - args := []string{} - for _, arg := range container.Args { - if strings.Contains(arg, " ") { - args = append(args, fmt.Sprintf("'%s'", arg)) - } else { - args = append(args, arg) - } - } - argsAsString := strings.Join(args, " ") - - newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) - } else { - newC.Command = fmt.Sprintf("%s", container.Path) - } - newC.Created = int(container.Created.Unix()) - newC.Status = container.State.String() - newC.HostConfig.NetworkMode = string(container.HostConfig().NetworkMode) - - newC.Ports = []types.Port{} - for port, bindings := range container.NetworkSettings.Ports { - p, err := nat.ParsePort(port.Port()) - if err != nil { - return err - } - if len(bindings) == 0 { - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: p, - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - h, err := nat.ParsePort(binding.HostPort) - if err != nil { - return err - } - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: p, - PublicPort: h, - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - - if config.Size { - sizeRw, sizeRootFs := container.GetSize() - newC.SizeRw = int(sizeRw) - newC.SizeRootFs = int(sizeRootFs) - } - newC.Labels = container.Config.Labels - containers = append(containers, newC) - return nil +// includeContainerInList decides whether a containers should be include in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *Container, ctx *listContext) iterationAction { + // Do not include container if it's stopped and we're not filters + if !container.Running && !ctx.All && ctx.Limit <= 0 && ctx.beforeContainer == nil && ctx.sinceContainer == nil { + return excludeContainer } - for _, container := range daemon.List() { - if err := writeCont(container); err != nil { - if err != errLast { + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Config.Labels) { + return excludeContainer + } + + // Do not include container if the isolation mode doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeContainer != nil { + if container.ID == ctx.beforeContainer.ID { + ctx.beforeContainer = nil + } + return excludeContainer + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Stop interation when the container arrives to the filter container + if ctx.sinceContainer != nil { + if container.ID == ctx.sinceContainer.ID { + return stopIteration + } + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode && !container.Running { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State.StateString()) { + return excludeContainer + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[container.ImageID] { + return excludeContainer + } + } + + return includeContainer +} + +func getImage(s *graph.TagStore, img, imgID string) (string, error) { + // both Image and ImageID is actually ids, nothing to guess + if strings.HasPrefix(imgID, img) { + return img, nil + } + id, err := s.GetID(img) + if err != nil { + if err == graph.ErrNameIsNotExist { + return imgID, nil + } + return "", err + } + if id != imgID { + return imgID, nil + } + return img, nil +} + +// transformContainer generates the container type expected by the docker ps command. +func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) (*types.Container, error) { + newC := &types.Container{ + ID: container.ID, + Names: ctx.names[container.ID], + ImageID: container.ImageID, + } + if newC.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + newC.Names = []string{} + } + + showImg, err := getImage(daemon.repositories, container.Config.Image, container.ImageID) + if err != nil { + return nil, err + } + newC.Image = showImg + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + newC.Command = container.Path + } + newC.Created = container.Created.Unix() + newC.Status = container.State.String() + newC.HostConfig.NetworkMode = string(container.hostConfig.NetworkMode) + + newC.Ports = []types.Port{} + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + return nil, err + } + if len(bindings) == 0 { + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { return nil, err } - break + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + PublicPort: h, + Type: port.Proto(), + IP: binding.HostIP, + }) } } - return containers, nil + + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(container) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + newC.Labels = container.Config.Labels + + return newC, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) { + var volumesOut []*types.Volume + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, err + } + + filterUsed := false + if i, ok := volFilters["dangling"]; ok { + if len(i) > 1 { + return nil, derr.ErrorCodeDanglingOne + } + + filterValue := i[0] + if strings.ToLower(filterValue) == "true" || filterValue == "1" { + filterUsed = true + } + } + + volumes := daemon.volumes.List() + for _, v := range volumes { + if filterUsed && daemon.volumes.Count(v) > 0 { + continue + } + volumesOut = append(volumesOut, volumeToAPIType(v)) + } + return volumesOut, nil +} + +func populateImageFilterByParents(ancestorMap map[string]bool, imageID string, byParents map[string][]*image.Image) { + if !ancestorMap[imageID] { + if images, ok := byParents[imageID]; ok { + for _, image := range images { + populateImageFilterByParents(ancestorMap, image.ID, byParents) + } + } + ancestorMap[imageID] = true + } } diff --git a/vendor/github.com/docker/docker/daemon/list_unix.go b/vendor/github.com/docker/docker/daemon/list_unix.go new file mode 100644 index 00000000..156e12d9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd + +package daemon + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *Container, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/vendor/github.com/docker/docker/daemon/list_windows.go b/vendor/github.com/docker/docker/daemon/list_windows.go new file mode 100644 index 00000000..d14bf8ca --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_windows.go @@ -0,0 +1,16 @@ +package daemon + +import "strings" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *Container, ctx *listContext) iterationAction { + i := strings.ToLower(string(container.hostConfig.Isolation)) + if i == "" { + i = "default" + } + if !ctx.filters.Match("isolation", i) { + return excludeContainer + } + return includeContainer +} diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go index ded4d27d..0abc6269 100644 --- a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go +++ b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go @@ -1,11 +1,13 @@ package daemon -// Importing packages here only to make sure their init gets called and -// therefore they register themselves to the logdriver factory. import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" _ "github.com/docker/docker/daemon/logger/fluentd" _ "github.com/docker/docker/daemon/logger/gelf" _ "github.com/docker/docker/daemon/logger/journald" _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/splunk" _ "github.com/docker/docker/daemon/logger/syslog" ) diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_windows.go b/vendor/github.com/docker/docker/daemon/logdrivers_windows.go index 5dcbe718..3b4a1f3a 100644 --- a/vendor/github.com/docker/docker/daemon/logdrivers_windows.go +++ b/vendor/github.com/docker/docker/daemon/logdrivers_windows.go @@ -1,7 +1,8 @@ package daemon -// Importing packages here only to make sure their init gets called and -// therefore they register themselves to the logdriver factory. import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" _ "github.com/docker/docker/daemon/logger/jsonfilelog" ) diff --git a/vendor/github.com/docker/docker/daemon/logs.go b/vendor/github.com/docker/docker/daemon/logs.go index e032c571..f3fa866a 100644 --- a/vendor/github.com/docker/docker/daemon/logs.go +++ b/vendor/github.com/docker/docker/daemon/logs.go @@ -1,28 +1,44 @@ package daemon import ( - "fmt" "io" "strconv" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/stdcopy" ) +// ContainerLogsConfig holds configs for logging operations. Exists +// for users of the daemon to to pass it a logging configuration. type ContainerLogsConfig struct { - Follow, Timestamps bool - Tail string - Since time.Time + // if true stream log output + Follow bool + // if true include timestamps for each line of log output + Timestamps bool + // return that many lines of log output from the end + Tail string + // filter logs by returning on those entries after this time + Since time.Time + // whether or not to show stdout and stderr as well as log entries. UseStdout, UseStderr bool OutStream io.Writer Stop <-chan bool } -func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error { +// ContainerLogs hooks up a container's stdout and stderr streams +// configured with the given struct. +func (daemon *Daemon) ContainerLogs(containerName string, config *ContainerLogsConfig) error { + container, err := daemon.Get(containerName) + if err != nil { + return derr.ErrorCodeNoSuchContainer.WithArgs(containerName) + } + if !(config.UseStdout || config.UseStderr) { - return fmt.Errorf("You must choose at least one stream") + return derr.ErrorCodeNeedStream } outStream := config.OutStream @@ -31,8 +47,9 @@ func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsC errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } + config.OutStream = outStream - cLog, err := container.getLogger() + cLog, err := daemon.getLogger(container) if err != nil { return err } @@ -81,3 +98,42 @@ func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsC } } } + +func (daemon *Daemon) getLogger(container *Container) (logger.Logger, error) { + if container.logDriver != nil && container.IsRunning() { + return container.logDriver, nil + } + cfg := container.getLogConfig(daemon.defaultLogConfig) + if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { + return nil, err + } + return container.StartLogger(cfg) +} + +// StartLogging initializes and starts the container logging stream. +func (daemon *Daemon) StartLogging(container *Container) error { + cfg := container.getLogConfig(daemon.defaultLogConfig) + if cfg.Type == "none" { + return nil // do not start logging routines + } + + if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { + return err + } + l, err := container.StartLogger(cfg) + if err != nil { + return derr.ErrorCodeInitLogger.WithArgs(err) + } + + copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.logCopier = copier + copier.Run() + container.logDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor.go b/vendor/github.com/docker/docker/daemon/monitor.go index 1f020574..02f594e4 100644 --- a/vendor/github.com/docker/docker/daemon/monitor.go +++ b/vendor/github.com/docker/docker/daemon/monitor.go @@ -3,13 +3,17 @@ package daemon import ( "io" "os/exec" + "strings" "sync" + "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" + derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" ) const ( @@ -17,6 +21,20 @@ const ( loggerCloseTimeout = 10 * time.Second ) +// containerSupervisor defines the interface that a supervisor must implement +type containerSupervisor interface { + // LogContainerEvent generates events related to a given container + LogContainerEvent(*Container, string) + // Cleanup ensures that the container is properly unmounted + Cleanup(*Container) + // StartLogging starts the logging driver for the container + StartLogging(*Container) error + // Run starts a container + Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) + // IsShuttingDown tells whether the supervisor is shutting down or not + IsShuttingDown() bool +} + // containerMonitor monitors the execution of a container's main process. // If a restart policy is specified for the container the monitor will ensure that the // process is restarted based on the rules of the policy. When the container is finally stopped @@ -25,6 +43,9 @@ const ( type containerMonitor struct { mux sync.Mutex + // supervisor keeps track of the container and the events it generates + supervisor containerSupervisor + // container is the container being monitored container *Container @@ -57,8 +78,9 @@ type containerMonitor struct { // newContainerMonitor returns an initialized containerMonitor for the provided container // honoring the provided restart policy -func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor { +func (daemon *Daemon) newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor { return &containerMonitor{ + supervisor: daemon, container: container, restartPolicy: policy, timeIncrement: defaultTimeIncrement, @@ -86,7 +108,7 @@ func (m *containerMonitor) ExitOnNext() { // unmounts the contatiner's root filesystem func (m *containerMonitor) Close() error { // Cleanup networking and mounts - m.container.cleanup() + m.supervisor.Cleanup(m.container) // FIXME: here is race condition between two RUN instructions in Dockerfile // because they share same runconfig and change image. Must be fixed @@ -119,6 +141,10 @@ func (m *containerMonitor) Start() error { } m.Close() }() + // reset stopped flag + if m.container.HasBeenManuallyStopped { + m.container.HasBeenManuallyStopped = false + } // reset the restart count m.container.RestartCount = -1 @@ -126,7 +152,7 @@ func (m *containerMonitor) Start() error { for { m.container.RestartCount++ - if err := m.container.startLogging(); err != nil { + if err := m.supervisor.StartLogging(m.container); err != nil { m.resetContainer(false) return err @@ -134,18 +160,37 @@ func (m *containerMonitor) Start() error { pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) - m.container.LogEvent("start") + m.logEvent("start") m.lastStartTime = time.Now() - if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil { + if exitStatus, err = m.supervisor.Run(m.container, pipes, m.callback); err != nil { // if we receive an internal error from the initial start of a container then lets // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if strings.Contains(err.Error(), "executable file not found") || + strings.Contains(err.Error(), "no such file or directory") || + strings.Contains(err.Error(), "system cannot find the file specified") { + if m.container.RestartCount == 0 { + m.container.ExitCode = 127 + m.resetContainer(false) + return derr.ErrorCodeCmdNotFound + } + } + // set to 126 for container cmd can't be invoked errors + if strings.Contains(err.Error(), syscall.EACCES.Error()) { + if m.container.RestartCount == 0 { + m.container.ExitCode = 126 + m.resetContainer(false) + return derr.ErrorCodeCmdCouldNotBeInvoked + } + } + if m.container.RestartCount == 0 { m.container.ExitCode = -1 m.resetContainer(false) - return err + return derr.ErrorCodeCantStart.WithArgs(m.container.ID, utils.GetErrorMessage(err)) } logrus.Errorf("Error running container: %s", err) @@ -157,11 +202,8 @@ func (m *containerMonitor) Start() error { m.resetMonitor(err == nil && exitStatus.ExitCode == 0) if m.shouldRestart(exitStatus.ExitCode) { - m.container.SetRestarting(&exitStatus) - if exitStatus.OOMKilled { - m.container.LogEvent("oom") - } - m.container.LogEvent("die") + m.container.setRestarting(&exitStatus) + m.logEvent("die") m.resetContainer(true) // sleep with a small time increment between each restart to help avoid issues cased by quickly @@ -175,10 +217,8 @@ func (m *containerMonitor) Start() error { } continue } - if exitStatus.OOMKilled { - m.container.LogEvent("oom") - } - m.container.LogEvent("die") + + m.logEvent("die") m.resetContainer(true) return err } @@ -223,11 +263,12 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool { // do not restart if the user or docker has requested that this container be stopped if m.shouldStop { + m.container.HasBeenManuallyStopped = !m.supervisor.IsShuttingDown() return false } switch { - case m.restartPolicy.IsAlways(): + case m.restartPolicy.IsAlways(), m.restartPolicy.IsUnlessStopped(): return true case m.restartPolicy.IsOnFailure(): // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count @@ -245,7 +286,14 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool { // callback ensures that the container's state is properly updated after we // received ack from the execution drivers -func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int) { +func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error { + go func() { + _, ok := <-chOOM + if ok { + m.logEvent("oom") + } + }() + if processConfig.Tty { // The callback is called after the process Start() // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave @@ -265,9 +313,10 @@ func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid close(m.startSignal) } - if err := m.container.ToDisk(); err != nil { + if err := m.container.toDiskLocking(); err != nil { logrus.Errorf("Error saving container to disk: %v", err) } + return nil } // resetContainer resets the container's IO and ensures that the command is able to be executed again @@ -337,3 +386,7 @@ func (m *containerMonitor) resetContainer(lock bool) { SysProcAttr: c.SysProcAttr, } } + +func (m *containerMonitor) logEvent(action string) { + m.supervisor.LogContainerEvent(m.container, action) +} diff --git a/vendor/github.com/docker/docker/daemon/mounts.go b/vendor/github.com/docker/docker/daemon/mounts.go new file mode 100644 index 00000000..9d195fa8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/mounts.go @@ -0,0 +1,46 @@ +package daemon + +import ( + "strings" + + derr "github.com/docker/docker/errors" + "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *Container) error { + for _, config := range container.MountPoints { + if len(config.Driver) > 0 { + v, err := daemon.createVolume(config.Name, config.Driver, nil) + if err != nil { + return err + } + config.Volume = v + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Decrement(m.Volume) + if rm { + err := daemon.volumes.Remove(m.Volume) + // ErrVolumeInUse is ignored because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && err != store.ErrVolumeInUse { + rmErrors = append(rmErrors, err.Error()) + } + } + } + if len(rmErrors) > 0 { + return derr.ErrorCodeRemovingVolume.WithArgs(strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/network.go b/vendor/github.com/docker/docker/daemon/network.go new file mode 100644 index 00000000..c1412cec --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network.go @@ -0,0 +1,148 @@ +package daemon + +import ( + "errors" + "fmt" + "net" + "strings" + + "github.com/docker/docker/daemon/network" + "github.com/docker/libnetwork" +) + +const ( + // NetworkByID represents a constant to find a network by its ID + NetworkByID = iota + 1 + // NetworkByName represents a constant to find a network by its Name + NetworkByName +) + +// NetworkControllerEnabled checks if the networking stack is enabled. +// This feature depends on OS primitives and it's dissabled in systems like Windows. +func (daemon *Daemon) NetworkControllerEnabled() bool { + return daemon.netController != nil +} + +// FindNetwork function finds a network for a given string that can represent network name or id +func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { + // Find by Name + n, err := daemon.GetNetwork(idName, NetworkByName) + if _, ok := err.(libnetwork.ErrNoSuchNetwork); err != nil && !ok { + return nil, err + } + + if n != nil { + return n, nil + } + + // Find by id + n, err = daemon.GetNetwork(idName, NetworkByID) + if err != nil { + return nil, err + } + + return n, nil +} + +// GetNetwork function returns a network for a given string that represents the network and +// a hint to indicate if the string is an Id or Name of the network +func (daemon *Daemon) GetNetwork(idName string, by int) (libnetwork.Network, error) { + c := daemon.netController + switch by { + case NetworkByID: + list := daemon.GetNetworksByID(idName) + + if len(list) == 0 { + return nil, libnetwork.ErrNoSuchNetwork(idName) + } + + if len(list) > 1 { + return nil, libnetwork.ErrInvalidID(idName) + } + + return list[0], nil + case NetworkByName: + if idName == "" { + idName = c.Config().Daemon.DefaultNetwork + } + return c.NetworkByName(idName) + } + return nil, errors.New("unexpected selector for GetNetwork") +} + +// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks +func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { + c := daemon.netController + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + if strings.HasPrefix(nw.ID(), partialID) { + list = append(list, nw) + } + return false + } + c.WalkNetworks(l) + + return list +} + +// CreateNetwork creates a network with the given name, driver and other optional parameters +func (daemon *Daemon) CreateNetwork(name, driver string, ipam network.IPAM, options map[string]string) (libnetwork.Network, error) { + c := daemon.netController + if driver == "" { + driver = c.Config().Daemon.DefaultDriver + } + + nwOptions := []libnetwork.NetworkOption{} + + v4Conf, v6Conf, err := getIpamConfig(ipam.Config) + if err != nil { + return nil, err + } + + nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf)) + nwOptions = append(nwOptions, libnetwork.NetworkOptionDriverOpts(options)) + return c.NewNetwork(driver, name, nwOptions...) +} + +func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { + ipamV4Cfg := []*libnetwork.IpamConf{} + ipamV6Cfg := []*libnetwork.IpamConf{} + for _, d := range data { + iCfg := libnetwork.IpamConf{} + iCfg.PreferredPool = d.Subnet + iCfg.SubPool = d.IPRange + iCfg.Gateway = d.Gateway + iCfg.AuxAddresses = d.AuxAddress + ip, _, err := net.ParseCIDR(d.Subnet) + if err != nil { + return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) + } + if ip.To4() != nil { + ipamV4Cfg = append(ipamV4Cfg, &iCfg) + } else { + ipamV6Cfg = append(ipamV6Cfg, &iCfg) + } + } + return ipamV4Cfg, ipamV6Cfg, nil +} + +// ConnectContainerToNetwork connects the given container to the given +// network. If either cannot be found, an err is returned. If the +// network cannot be set up, an err is returned. +func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string) error { + container, err := daemon.Get(containerName) + if err != nil { + return err + } + return daemon.ConnectToNetwork(container, networkName) +} + +// DisconnectContainerFromNetwork disconnects the given container from +// the given network. If either cannot be found, an err is returned. +func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, network libnetwork.Network) error { + container, err := daemon.Get(containerName) + if err != nil { + return err + } + return container.DisconnectFromNetwork(network) +} diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go index a2e61eb9..ddc87a7c 100644 --- a/vendor/github.com/docker/docker/daemon/network/settings.go +++ b/vendor/github.com/docker/docker/daemon/network/settings.go @@ -8,24 +8,44 @@ type Address struct { PrefixLen int } +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + // Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., type Settings struct { Bridge string - EndpointID string - Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int + SandboxID string HairpinMode bool - IPAddress string - IPPrefixLen int - IPv6Gateway string LinkLocalIPv6Address string LinkLocalIPv6PrefixLen int - MacAddress string - NetworkID string - PortMapping map[string]map[string]string // Deprecated + Networks map[string]*EndpointSettings Ports nat.PortMap SandboxKey string SecondaryIPAddresses []Address SecondaryIPv6Addresses []Address + IsAnonymousEndpoint bool +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string } diff --git a/vendor/github.com/docker/docker/daemon/pause.go b/vendor/github.com/docker/docker/daemon/pause.go index 74513864..be7f6339 100644 --- a/vendor/github.com/docker/docker/daemon/pause.go +++ b/vendor/github.com/docker/docker/daemon/pause.go @@ -1,6 +1,8 @@ package daemon -import "fmt" +import ( + derr "github.com/docker/docker/errors" +) // ContainerPause pauses a container func (daemon *Daemon) ContainerPause(name string) error { @@ -9,9 +11,33 @@ func (daemon *Daemon) ContainerPause(name string) error { return err } - if err := container.Pause(); err != nil { - return fmt.Errorf("Cannot pause container %s: %s", name, err) + if err := daemon.containerPause(container); err != nil { + return derr.ErrorCodePauseError.WithArgs(name, err) } return nil } + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return derr.ErrorCodeNotRunning.WithArgs(container.ID) + } + + // We cannot Pause the container which is already paused + if container.Paused { + return derr.ErrorCodeAlreadyPaused.WithArgs(container.ID) + } + + if err := daemon.execDriver.Pause(container.command); err != nil { + return err + } + container.Paused = true + daemon.LogContainerEvent(container, "pause") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/rename.go b/vendor/github.com/docker/docker/daemon/rename.go index 46642979..36421dcd 100644 --- a/vendor/github.com/docker/docker/daemon/rename.go +++ b/vendor/github.com/docker/docker/daemon/rename.go @@ -1,15 +1,29 @@ package daemon import ( - "fmt" + "strings" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/errors" + "github.com/docker/libnetwork" ) +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. func (daemon *Daemon) ContainerRename(oldName, newName string) error { + var ( + err error + sid string + sb libnetwork.Sandbox + container *Container + ) + if oldName == "" || newName == "" { - return fmt.Errorf("usage: docker rename OLD_NAME NEW_NAME") + return derr.ErrorCodeEmptyRename } - container, err := daemon.Get(oldName) + container, err = daemon.Get(oldName) if err != nil { return err } @@ -19,27 +33,52 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { container.Lock() defer container.Unlock() if newName, err = daemon.reserveName(container.ID, newName); err != nil { - return fmt.Errorf("Error when allocating new name: %s", err) + return derr.ErrorCodeRenameTaken.WithArgs(err) } container.Name = newName - undo := func() { - container.Name = oldName - daemon.reserveName(container.ID, oldName) - daemon.containerGraph.Delete(newName) + defer func() { + if err != nil { + container.Name = oldName + daemon.reserveName(container.ID, oldName) + daemon.containerGraphDB.Delete(newName) + } + }() + + if err = daemon.containerGraphDB.Delete(oldName); err != nil { + return derr.ErrorCodeRenameDelete.WithArgs(oldName, err) } - if err := daemon.containerGraph.Delete(oldName); err != nil { - undo() - return fmt.Errorf("Failed to delete container %q: %v", oldName, err) - } - - if err := container.toDisk(); err != nil { - undo() + if err = container.toDisk(); err != nil { return err } - container.LogEvent("rename") + if !container.Running { + daemon.LogContainerEvent(container, "rename") + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + if e := container.toDisk(); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + sid = container.NetworkSettings.SandboxID + sb, err = daemon.netController.SandboxByID(sid) + if err != nil { + return err + } + + err = sb.Rename(strings.TrimPrefix(container.Name, "/")) + if err != nil { + return err + } + + daemon.LogContainerEvent(container, "rename") return nil } diff --git a/vendor/github.com/docker/docker/daemon/resize.go b/vendor/github.com/docker/docker/daemon/resize.go index f2253946..ea9928c4 100644 --- a/vendor/github.com/docker/docker/daemon/resize.go +++ b/vendor/github.com/docker/docker/daemon/resize.go @@ -1,19 +1,33 @@ package daemon +import derr "github.com/docker/docker/errors" + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. func (daemon *Daemon) ContainerResize(name string, height, width int) error { container, err := daemon.Get(name) if err != nil { return err } - return container.Resize(height, width) + if !container.IsRunning() { + return derr.ErrorCodeNotRunning.WithArgs(container.ID) + } + + if err = container.Resize(height, width); err == nil { + daemon.LogContainerEvent(container, "resize") + } + return err } +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { - execConfig, err := daemon.getExecConfig(name) + ExecConfig, err := daemon.getExecConfig(name) if err != nil { return err } - return execConfig.Resize(height, width) + return ExecConfig.resize(height, width) } diff --git a/vendor/github.com/docker/docker/daemon/restart.go b/vendor/github.com/docker/docker/daemon/restart.go index cf7d9e18..fcfe6128 100644 --- a/vendor/github.com/docker/docker/daemon/restart.go +++ b/vendor/github.com/docker/docker/daemon/restart.go @@ -1,14 +1,46 @@ package daemon -import "fmt" +import ( + derr "github.com/docker/docker/errors" +) +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. func (daemon *Daemon) ContainerRestart(name string, seconds int) error { container, err := daemon.Get(name) if err != nil { return err } - if err := container.Restart(seconds); err != nil { - return fmt.Errorf("Cannot restart container %s: %s\n", name, err) + if err := daemon.containerRestart(container, seconds); err != nil { + return derr.ErrorCodeCantRestart.WithArgs(name, err) } return nil } + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if err := daemon.containerStop(container, seconds); err != nil { + return err + } + + if err := daemon.containerStart(container); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/start.go b/vendor/github.com/docker/docker/daemon/start.go index 9df56c5a..de4516c7 100644 --- a/vendor/github.com/docker/docker/daemon/start.go +++ b/vendor/github.com/docker/docker/daemon/start.go @@ -1,31 +1,30 @@ package daemon import ( - "fmt" "runtime" + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/promise" "github.com/docker/docker/runconfig" ) +// ContainerStart starts a container. func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error { container, err := daemon.Get(name) if err != nil { return err } - if container.IsPaused() { - return fmt.Errorf("Cannot start a paused container, try unpause instead.") + if container.isPaused() { + return derr.ErrorCodeStartPaused } if container.IsRunning() { - return fmt.Errorf("Container already started") + return derr.ErrorCodeAlreadyStarted } - if _, err = daemon.verifyContainerSettings(hostConfig, nil); err != nil { - return err - } - - // Windows does not have the backwards compatibilty issue here. + // Windows does not have the backwards compatibility issue here. if runtime.GOOS != "windows" { // This is kept for backward compatibility - hostconfig should be passed when // creating a container, not during start. @@ -36,13 +35,126 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf } } else { if hostConfig != nil { - return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + return derr.ErrorCodeHostConfigStart } } - if err := container.Start(); err != nil { - return fmt.Errorf("Cannot start container %s: %s", name, err) + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.hostConfig, nil); err != nil { + return err + } + + if err := daemon.containerStart(container); err != nil { + return err } return nil } + +// Start starts a container +func (daemon *Daemon) Start(container *Container) error { + return daemon.containerStart(container) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *Container) (err error) { + container.Lock() + defer container.Unlock() + + if container.Running { + return nil + } + + if container.removalInProgress || container.Dead { + return derr.ErrorCodeContainerBeingRemoved + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.setError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode == 0 { + container.ExitCode = 128 + } + container.toDisk() + daemon.Cleanup(container) + daemon.LogContainerEvent(container, "die") + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig) + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + linkedEnv, err := daemon.setupLinkedContainers(container) + if err != nil { + return err + } + if err := container.setupWorkingDirectory(); err != nil { + return err + } + env := container.createDaemonEnvironment(linkedEnv) + if err := daemon.populateCommand(container, env); err != nil { + return err + } + + if !container.hostConfig.IpcMode.IsContainer() && !container.hostConfig.IpcMode.IsHost() { + if err := daemon.setupIpcDirs(container); err != nil { + return err + } + } + + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + mounts = append(mounts, container.ipcMounts()...) + + container.command.Mounts = mounts + return daemon.waitForStart(container) +} + +func (daemon *Daemon) waitForStart(container *Container) error { + container.monitor = daemon.newContainerMonitor(container, container.hostConfig.RestartPolicy) + + // block until we either receive an error from the initial start of the container's + // process or until the process is running in the container + select { + case <-container.monitor.startSignal: + case err := <-promise.Go(container.monitor.Start): + return err + } + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *Container) { + daemon.releaseNetwork(container) + + container.unmountIpcMounts(detachMounted) + + daemon.conditionalUnmountOnCleanup(container) + + for _, eConfig := range container.execCommands.s { + daemon.unregisterExecCommand(eConfig) + } + + if err := container.unmountVolumes(false); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/state.go b/vendor/github.com/docker/docker/daemon/state.go index 861671d7..8ff5effc 100644 --- a/vendor/github.com/docker/docker/daemon/state.go +++ b/vendor/github.com/docker/docker/daemon/state.go @@ -6,11 +6,17 @@ import ( "time" "github.com/docker/docker/daemon/execdriver" + derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/units" ) +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. type State struct { sync.Mutex + // FIXME: Why do we have both paused and running if a + // container cannot be paused and running at the same time? Running bool Paused bool Restarting bool @@ -25,6 +31,7 @@ type State struct { waitChan chan struct{} } +// NewState creates a default state object with a fresh channel for state changes. func NewState() *State { return &State{ waitChan: make(chan struct{}), @@ -105,16 +112,17 @@ func wait(waitChan <-chan struct{}, timeout time.Duration) error { } select { case <-time.After(timeout): - return fmt.Errorf("Timed out: %v", timeout) + return derr.ErrorCodeTimedOut.WithArgs(timeout) case <-waitChan: return nil } } -// WaitRunning waits until state is running. If state already running it returns -// immediately. If you want wait forever you must supply negative timeout. -// Returns pid, that was passed to SetRunning -func (s *State) WaitRunning(timeout time.Duration) (int, error) { +// waitRunning waits until state is running. If state is already +// running it returns immediately. If you want wait forever you must +// supply negative timeout. Returns pid, that was passed to +// setRunning. +func (s *State) waitRunning(timeout time.Duration) (int, error) { s.Lock() if s.Running { pid := s.Pid @@ -126,12 +134,12 @@ func (s *State) WaitRunning(timeout time.Duration) (int, error) { if err := wait(waitChan, timeout); err != nil { return -1, err } - return s.GetPid(), nil + return s.GetPID(), nil } // WaitStop waits until state is stopped. If state already stopped it returns // immediately. If you want wait forever you must supply negative timeout. -// Returns exit code, that was passed to SetStopped +// Returns exit code, that was passed to setStoppedLocking func (s *State) WaitStop(timeout time.Duration) (int, error) { s.Lock() if !s.Running { @@ -144,9 +152,10 @@ func (s *State) WaitStop(timeout time.Duration) (int, error) { if err := wait(waitChan, timeout); err != nil { return -1, err } - return s.GetExitCode(), nil + return s.getExitCode(), nil } +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. func (s *State) IsRunning() bool { s.Lock() res := s.Running @@ -154,26 +163,21 @@ func (s *State) IsRunning() bool { return res } -func (s *State) GetPid() int { +// GetPID holds the process id of a container. +func (s *State) GetPID() int { s.Lock() res := s.Pid s.Unlock() return res } -func (s *State) GetExitCode() int { +func (s *State) getExitCode() int { s.Lock() res := s.ExitCode s.Unlock() return res } -func (s *State) SetRunning(pid int) { - s.Lock() - s.setRunning(pid) - s.Unlock() -} - func (s *State) setRunning(pid int) { s.Error = "" s.Running = true @@ -186,7 +190,7 @@ func (s *State) setRunning(pid int) { s.waitChan = make(chan struct{}) } -func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) { +func (s *State) setStoppedLocking(exitStatus *execdriver.ExitStatus) { s.Lock() s.setStopped(exitStatus) s.Unlock() @@ -197,27 +201,29 @@ func (s *State) setStopped(exitStatus *execdriver.ExitStatus) { s.Restarting = false s.Pid = 0 s.FinishedAt = time.Now().UTC() - s.ExitCode = exitStatus.ExitCode - s.OOMKilled = exitStatus.OOMKilled + s.setFromExitStatus(exitStatus) close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) } -// SetRestarting is when docker handles the auto restart of containers when they are +// setRestarting is when docker handles the auto restart of containers when they are // in the middle of a stop and being restarted again -func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) { +func (s *State) setRestartingLocking(exitStatus *execdriver.ExitStatus) { s.Lock() + s.setRestarting(exitStatus) + s.Unlock() +} + +func (s *State) setRestarting(exitStatus *execdriver.ExitStatus) { // we should consider the container running when it is restarting because of // all the checks in docker around rm/stop/etc s.Running = true s.Restarting = true s.Pid = 0 s.FinishedAt = time.Now().UTC() - s.ExitCode = exitStatus.ExitCode - s.OOMKilled = exitStatus.OOMKilled + s.setFromExitStatus(exitStatus) close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) - s.Unlock() } // setError sets the container's error state. This is useful when we want to @@ -227,49 +233,30 @@ func (s *State) setError(err error) { s.Error = err.Error() } -func (s *State) IsRestarting() bool { - s.Lock() - res := s.Restarting - s.Unlock() - return res -} - -func (s *State) SetPaused() { - s.Lock() - s.Paused = true - s.Unlock() -} - -func (s *State) SetUnpaused() { - s.Lock() - s.Paused = false - s.Unlock() -} - -func (s *State) IsPaused() bool { +func (s *State) isPaused() bool { s.Lock() res := s.Paused s.Unlock() return res } -func (s *State) SetRemovalInProgress() error { +func (s *State) setRemovalInProgress() error { s.Lock() defer s.Unlock() if s.removalInProgress { - return fmt.Errorf("Status is already RemovalInProgress") + return derr.ErrorCodeAlreadyRemoving } s.removalInProgress = true return nil } -func (s *State) ResetRemovalInProgress() { +func (s *State) resetRemovalInProgress() { s.Lock() s.removalInProgress = false s.Unlock() } -func (s *State) SetDead() { +func (s *State) setDead() { s.Lock() s.Dead = true s.Unlock() diff --git a/vendor/github.com/docker/docker/daemon/state_test.go b/vendor/github.com/docker/docker/daemon/state_test.go index 861076ae..c70dc4e1 100644 --- a/vendor/github.com/docker/docker/daemon/state_test.go +++ b/vendor/github.com/docker/docker/daemon/state_test.go @@ -14,11 +14,14 @@ func TestStateRunStop(t *testing.T) { started := make(chan struct{}) var pid int64 go func() { - runPid, _ := s.WaitRunning(-1 * time.Second) + runPid, _ := s.waitRunning(-1 * time.Second) atomic.StoreInt64(&pid, int64(runPid)) close(started) }() - s.SetRunning(i + 100) + s.Lock() + s.setRunning(i + 100) + s.Unlock() + if !s.IsRunning() { t.Fatal("State not running") } @@ -38,8 +41,8 @@ func TestStateRunStop(t *testing.T) { if runPid != i+100 { t.Fatalf("Pid %v, expected %v", runPid, i+100) } - if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 { - t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) + if pid, err := s.waitRunning(-1 * time.Second); err != nil || pid != i+100 { + t.Fatalf("waitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) } stopped := make(chan struct{}) @@ -49,7 +52,7 @@ func TestStateRunStop(t *testing.T) { atomic.StoreInt64(&exit, int64(exitCode)) close(stopped) }() - s.SetStopped(&execdriver.ExitStatus{ExitCode: i}) + s.setStoppedLocking(&execdriver.ExitStatus{ExitCode: i}) if s.IsRunning() { t.Fatal("State is running") } @@ -79,7 +82,7 @@ func TestStateTimeoutWait(t *testing.T) { s := NewState() started := make(chan struct{}) go func() { - s.WaitRunning(100 * time.Millisecond) + s.waitRunning(100 * time.Millisecond) close(started) }() select { @@ -88,10 +91,14 @@ func TestStateTimeoutWait(t *testing.T) { case <-started: t.Log("Start callback fired") } - s.SetRunning(42) + + s.Lock() + s.setRunning(49) + s.Unlock() + stopped := make(chan struct{}) go func() { - s.WaitRunning(100 * time.Millisecond) + s.waitRunning(100 * time.Millisecond) close(stopped) }() select { diff --git a/vendor/github.com/docker/docker/daemon/state_unix.go b/vendor/github.com/docker/docker/daemon/state_unix.go new file mode 100644 index 00000000..e5f4db33 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/state_unix.go @@ -0,0 +1,12 @@ +// +build linux freebsd + +package daemon + +import "github.com/docker/docker/daemon/execdriver" + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *execdriver.ExitStatus) { + s.ExitCode = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/vendor/github.com/docker/docker/daemon/state_windows.go b/vendor/github.com/docker/docker/daemon/state_windows.go new file mode 100644 index 00000000..223d4bc5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/state_windows.go @@ -0,0 +1,9 @@ +package daemon + +import "github.com/docker/docker/daemon/execdriver" + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *execdriver.ExitStatus) { + s.ExitCode = exitStatus.ExitCode +} diff --git a/vendor/github.com/docker/docker/daemon/stats.go b/vendor/github.com/docker/docker/daemon/stats.go index c5079624..ed829c85 100644 --- a/vendor/github.com/docker/docker/daemon/stats.go +++ b/vendor/github.com/docker/docker/daemon/stats.go @@ -5,46 +5,56 @@ import ( "io" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/libnetwork/sandbox" - "github.com/opencontainers/runc/libcontainer" + "github.com/docker/docker/pkg/version" ) +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a daemon.ContainerStats() call. type ContainerStatsConfig struct { Stream bool OutStream io.Writer Stop <-chan bool + Version version.Version } -func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig) error { - updates, err := daemon.SubscribeToContainerStats(name) +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(prefixOrName string, config *ContainerStatsConfig) error { + container, err := daemon.Get(prefixOrName) if err != nil { return err } + // If the container is not running and requires no stream, return an empty stats. + if !container.IsRunning() && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) + } + if config.Stream { + // Write an empty chunk of data. + // This is to ensure that the HTTP status code is sent immediately, + // even if the container has not yet produced any data. config.OutStream.Write(nil) } - var preCpuStats types.CpuStats - getStat := func(v interface{}) *types.Stats { + var preCPUStats types.CPUStats + getStatJSON := func(v interface{}) *types.StatsJSON { update := v.(*execdriver.ResourceStats) - // Retrieve the nw statistics from libnetwork and inject them in the Stats - if nwStats, err := daemon.getNetworkStats(name); err == nil { - update.Stats.Interfaces = nwStats - } ss := convertStatsToAPITypes(update.Stats) - ss.PreCpuStats = preCpuStats + ss.PreCPUStats = preCPUStats ss.MemoryStats.Limit = uint64(update.MemoryLimit) ss.Read = update.Read - ss.CpuStats.SystemUsage = update.SystemUsage - preCpuStats = ss.CpuStats + ss.CPUStats.SystemUsage = update.SystemUsage + preCPUStats = ss.CPUStats return ss } enc := json.NewEncoder(config.OutStream) - defer daemon.UnsubscribeToContainerStats(name, updates) + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) noStreamFirstFrame := true for { @@ -54,14 +64,53 @@ func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig) return nil } - s := getStat(v) + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if config.Version.LessThan("1.21") { + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + if !config.Stream && noStreamFirstFrame { // prime the cpu stats so they aren't 0 in the final output noStreamFirstFrame = false continue } - if err := enc.Encode(s); err != nil { + if err := enc.Encode(statsJSON); err != nil { return err } @@ -73,46 +122,3 @@ func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig) } } } - -func (daemon *Daemon) getNetworkStats(name string) ([]*libcontainer.NetworkInterface, error) { - var list []*libcontainer.NetworkInterface - - c, err := daemon.Get(name) - if err != nil { - return list, err - } - - nw, err := daemon.netController.NetworkByID(c.NetworkSettings.NetworkID) - if err != nil { - return list, err - } - ep, err := nw.EndpointByID(c.NetworkSettings.EndpointID) - if err != nil { - return list, err - } - - stats, err := ep.Statistics() - if err != nil { - return list, err - } - - // Convert libnetwork nw stats into libcontainer nw stats - for ifName, ifStats := range stats { - list = append(list, convertLnNetworkStats(ifName, ifStats)) - } - - return list, nil -} - -func convertLnNetworkStats(name string, stats *sandbox.InterfaceStatistics) *libcontainer.NetworkInterface { - n := &libcontainer.NetworkInterface{Name: name} - n.RxBytes = stats.RxBytes - n.RxPackets = stats.RxPackets - n.RxErrors = stats.RxErrors - n.RxDropped = stats.RxDropped - n.TxBytes = stats.TxBytes - n.TxPackets = stats.TxPackets - n.TxErrors = stats.TxErrors - n.TxDropped = stats.TxDropped - return n -} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_unix.go b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go index 73b6a872..6fb32b9a 100644 --- a/vendor/github.com/docker/docker/daemon/stats_collector_unix.go +++ b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go @@ -4,7 +4,6 @@ package daemon import ( "bufio" - "fmt" "os" "strconv" "strings" @@ -13,20 +12,27 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" + derr "github.com/docker/docker/errors" "github.com/docker/docker/pkg/pubsub" "github.com/opencontainers/runc/libcontainer/system" ) +type statsSupervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *Container) (*execdriver.ResourceStats, error) +} + // newStatsCollector returns a new statsCollector that collections // network and cgroup stats for a registered container at the specified // interval. The collector allows non-running containers to be added // and will start processing stats when they are started. -func newStatsCollector(interval time.Duration) *statsCollector { +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { s := &statsCollector{ - interval: interval, - publishers: make(map[*Container]*pubsub.Publisher), - clockTicks: uint64(system.GetClockTicks()), - bufReader: bufio.NewReaderSize(nil, 128), + interval: interval, + supervisor: daemon, + publishers: make(map[*Container]*pubsub.Publisher), + clockTicksPerSecond: uint64(system.GetClockTicks()), + bufReader: bufio.NewReaderSize(nil, 128), } go s.run() return s @@ -34,11 +40,12 @@ func newStatsCollector(interval time.Duration) *statsCollector { // statsCollector manages and provides container resource stats type statsCollector struct { - m sync.Mutex - interval time.Duration - clockTicks uint64 - publishers map[*Container]*pubsub.Publisher - bufReader *bufio.Reader + m sync.Mutex + supervisor statsSupervisor + interval time.Duration + clockTicksPerSecond uint64 + publishers map[*Container]*pubsub.Publisher + bufReader *bufio.Reader } // collect registers the container with the collector and adds it to @@ -89,12 +96,6 @@ func (s *statsCollector) run() { var pairs []publishersPair for range time.Tick(s.interval) { - systemUsage, err := s.getSystemCpuUsage() - if err != nil { - logrus.Errorf("collecting system cpu usage: %v", err) - continue - } - // it does not make sense in the first iteration, // but saves allocations in further iterations pairs = pairs[:0] @@ -105,9 +106,18 @@ func (s *statsCollector) run() { pairs = append(pairs, publishersPair{container, publisher}) } s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } for _, pair := range pairs { - stats, err := pair.container.Stats() + stats, err := s.supervisor.GetContainerStats(pair.container) if err != nil { if err != execdriver.ErrNotRunning { logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) @@ -115,16 +125,23 @@ func (s *statsCollector) run() { continue } stats.SystemUsage = systemUsage + pair.publisher.Publish(stats) } } } -const nanoSeconds = 1e9 +const nanoSecondsPerSecond = 1e9 -// getSystemCpuUSage returns the host system's cpu usage in nanoseconds -// for the system to match the cgroup readings are returned in the same format. -func (s *statsCollector) getSystemCpuUsage() (uint64, error) { +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { var line string f, err := os.Open("/proc/stat") if err != nil { @@ -145,18 +162,19 @@ func (s *statsCollector) getSystemCpuUsage() (uint64, error) { switch parts[0] { case "cpu": if len(parts) < 8 { - return 0, fmt.Errorf("invalid number of cpu fields") + return 0, derr.ErrorCodeBadCPUFields } - var sum uint64 + var totalClockTicks uint64 for _, i := range parts[1:8] { v, err := strconv.ParseUint(i, 10, 64) if err != nil { - return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + return 0, derr.ErrorCodeBadCPUInt.WithArgs(i, err) } - sum += v + totalClockTicks += v } - return (sum * nanoSeconds) / s.clockTicks, nil + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil } } - return 0, fmt.Errorf("invalid stat format") + return 0, derr.ErrorCodeBadStatFormat } diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_windows.go b/vendor/github.com/docker/docker/daemon/stats_collector_windows.go index f16d7ee3..e63f37b0 100644 --- a/vendor/github.com/docker/docker/daemon/stats_collector_windows.go +++ b/vendor/github.com/docker/docker/daemon/stats_collector_windows.go @@ -6,7 +6,7 @@ import "time" // for a registered container at the specified interval. The collector allows // non-running containers to be added and will start processing stats when // they are started. -func newStatsCollector(interval time.Duration) *statsCollector { +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { return &statsCollector{} } diff --git a/vendor/github.com/docker/docker/daemon/stats_freebsd.go b/vendor/github.com/docker/docker/daemon/stats_freebsd.go index 4ec7c657..1898ca9d 100644 --- a/vendor/github.com/docker/docker/daemon/stats_freebsd.go +++ b/vendor/github.com/docker/docker/daemon/stats_freebsd.go @@ -6,9 +6,9 @@ import ( ) // convertStatsToAPITypes converts the libcontainer.Stats to the api specific -// structs. This is done to preserve API compatibility and versioning. -func convertStatsToAPITypes(ls *libcontainer.Stats) *types.Stats { +// structs. This is done to preserve API compatibility and versioning. +func convertStatsToAPITypes(ls *libcontainer.Stats) *types.StatsJSON { // TODO FreeBSD. Refactor accordingly to fill in stats. - s := &types.Stats{} + s := &types.StatsJSON{} return s } diff --git a/vendor/github.com/docker/docker/daemon/stats_linux.go b/vendor/github.com/docker/docker/daemon/stats_linux.go index 8c1b0873..466f2df5 100644 --- a/vendor/github.com/docker/docker/daemon/stats_linux.go +++ b/vendor/github.com/docker/docker/daemon/stats_linux.go @@ -7,20 +7,24 @@ import ( ) // convertStatsToAPITypes converts the libcontainer.Stats to the api specific -// structs. This is done to preserve API compatibility and versioning. -func convertStatsToAPITypes(ls *libcontainer.Stats) *types.Stats { - s := &types.Stats{} +// structs. This is done to preserve API compatibility and versioning. +func convertStatsToAPITypes(ls *libcontainer.Stats) *types.StatsJSON { + s := &types.StatsJSON{} if ls.Interfaces != nil { - s.Network = types.Network{} + s.Networks = make(map[string]types.NetworkStats) for _, iface := range ls.Interfaces { - s.Network.RxBytes += iface.RxBytes - s.Network.RxPackets += iface.RxPackets - s.Network.RxErrors += iface.RxErrors - s.Network.RxDropped += iface.RxDropped - s.Network.TxBytes += iface.TxBytes - s.Network.TxPackets += iface.TxPackets - s.Network.TxErrors += iface.TxErrors - s.Network.TxDropped += iface.TxDropped + // For API Version >= 1.21, the original data of network will + // be returned. + s.Networks[iface.Name] = types.NetworkStats{ + RxBytes: iface.RxBytes, + RxPackets: iface.RxPackets, + RxErrors: iface.RxErrors, + RxDropped: iface.RxDropped, + TxBytes: iface.TxBytes, + TxPackets: iface.TxPackets, + TxErrors: iface.TxErrors, + TxDropped: iface.TxDropped, + } } } @@ -37,8 +41,8 @@ func convertStatsToAPITypes(ls *libcontainer.Stats) *types.Stats { SectorsRecursive: copyBlkioEntry(cs.BlkioStats.SectorsRecursive), } cpu := cs.CpuStats - s.CpuStats = types.CpuStats{ - CpuUsage: types.CpuUsage{ + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ TotalUsage: cpu.CpuUsage.TotalUsage, PercpuUsage: cpu.CpuUsage.PercpuUsage, UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, diff --git a/vendor/github.com/docker/docker/daemon/stats_windows.go b/vendor/github.com/docker/docker/daemon/stats_windows.go index c79eb640..fc8991ba 100644 --- a/vendor/github.com/docker/docker/daemon/stats_windows.go +++ b/vendor/github.com/docker/docker/daemon/stats_windows.go @@ -6,9 +6,9 @@ import ( ) // convertStatsToAPITypes converts the libcontainer.Stats to the api specific -// structs. This is done to preserve API compatibility and versioning. -func convertStatsToAPITypes(ls *libcontainer.Stats) *types.Stats { +// structs. This is done to preserve API compatibility and versioning. +func convertStatsToAPITypes(ls *libcontainer.Stats) *types.StatsJSON { // TODO Windows. Refactor accordingly to fill in stats. - s := &types.Stats{} + s := &types.StatsJSON{} return s } diff --git a/vendor/github.com/docker/docker/daemon/stop.go b/vendor/github.com/docker/docker/daemon/stop.go index 23253e39..c97781b8 100644 --- a/vendor/github.com/docker/docker/daemon/stop.go +++ b/vendor/github.com/docker/docker/daemon/stop.go @@ -1,17 +1,60 @@ package daemon -import "fmt" +import ( + "time" + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/errors" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. func (daemon *Daemon) ContainerStop(name string, seconds int) error { container, err := daemon.Get(name) if err != nil { return err } if !container.IsRunning() { - return fmt.Errorf("Container already stopped") + return derr.ErrorCodeStopped } - if err := container.Stop(seconds); err != nil { - return fmt.Errorf("Cannot stop container %s: %s\n", name, err) + if err := daemon.containerStop(container, seconds); err != nil { + return derr.ErrorCodeCantStop.WithArgs(name, err) } return nil } + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + // 1. Send a SIGTERM + if err := daemon.killPossiblyDeadProcess(container, container.stopSignal()); err != nil { + logrus.Infof("Failed to send SIGTERM to the process, force killing") + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + container.WaitStop(-1 * time.Second) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/top.go b/vendor/github.com/docker/docker/daemon/top_unix.go similarity index 66% rename from vendor/github.com/docker/docker/daemon/top.go rename to vendor/github.com/docker/docker/daemon/top_unix.go index 30a7893a..36ace121 100644 --- a/vendor/github.com/docker/docker/daemon/top.go +++ b/vendor/github.com/docker/docker/daemon/top_unix.go @@ -1,14 +1,21 @@ +//+build !windows + package daemon import ( - "fmt" "os/exec" "strconv" "strings" "github.com/docker/docker/api/types" + derr "github.com/docker/docker/errors" ) +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { if psArgs == "" { psArgs = "-ef" @@ -20,7 +27,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container } if !container.IsRunning() { - return nil, fmt.Errorf("Container %s is not running", name) + return nil, derr.ErrorCodeNotRunning.WithArgs(name) } pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) @@ -30,7 +37,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() if err != nil { - return nil, fmt.Errorf("Error running ps: %s", err) + return nil, derr.ErrorCodePSError.WithArgs(err) } procList := &types.ContainerProcessList{} @@ -45,9 +52,10 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container } } if pidIndex == -1 { - return nil, fmt.Errorf("Couldn't find PID field in ps output") + return nil, derr.ErrorCodeNoPID } + // loop through the output and extract the PID from each line for _, line := range lines[1:] { if len(line) == 0 { continue @@ -55,7 +63,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container fields := strings.Fields(line) p, err := strconv.Atoi(fields[pidIndex]) if err != nil { - return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + return nil, derr.ErrorCodeBadPID.WithArgs(fields[pidIndex], err) } for _, pid := range pids { @@ -68,6 +76,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container } } } - container.LogEvent("top") + daemon.LogContainerEvent(container, "top") return procList, nil } diff --git a/vendor/github.com/docker/docker/daemon/top_windows.go b/vendor/github.com/docker/docker/daemon/top_windows.go new file mode 100644 index 00000000..f224b2e2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_windows.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + derr "github.com/docker/docker/errors" +) + +// ContainerTop is not supported on Windows and returns an error. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + return nil, derr.ErrorCodeNoTop +} diff --git a/vendor/github.com/docker/docker/daemon/unpause.go b/vendor/github.com/docker/docker/daemon/unpause.go index 3550b7a9..3397f44b 100644 --- a/vendor/github.com/docker/docker/daemon/unpause.go +++ b/vendor/github.com/docker/docker/daemon/unpause.go @@ -1,6 +1,8 @@ package daemon -import "fmt" +import ( + derr "github.com/docker/docker/errors" +) // ContainerUnpause unpauses a container func (daemon *Daemon) ContainerUnpause(name string) error { @@ -9,9 +11,33 @@ func (daemon *Daemon) ContainerUnpause(name string) error { return err } - if err := container.Unpause(); err != nil { - return fmt.Errorf("Cannot unpause container %s: %s", name, err) + if err := daemon.containerUnpause(container); err != nil { + return derr.ErrorCodeCantUnpause.WithArgs(name, err) } return nil } + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not running + if !container.Running { + return derr.ErrorCodeNotRunning.WithArgs(container.ID) + } + + // We cannot unpause the container which is not paused + if !container.Paused { + return derr.ErrorCodeNotPaused.WithArgs(container.ID) + } + + if err := daemon.execDriver.Unpause(container.command); err != nil { + return err + } + + container.Paused = false + daemon.LogContainerEvent(container, "unpause") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/utils_freebsd.go b/vendor/github.com/docker/docker/daemon/utils_freebsd.go deleted file mode 100644 index 1e1ed1b3..00000000 --- a/vendor/github.com/docker/docker/daemon/utils_freebsd.go +++ /dev/null @@ -1,9 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/runconfig" -) - -func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) ([]string, error) { - return nil, nil -} diff --git a/vendor/github.com/docker/docker/daemon/utils_linux.go b/vendor/github.com/docker/docker/daemon/utils_linux.go index 042544e4..83a34471 100644 --- a/vendor/github.com/docker/docker/daemon/utils_linux.go +++ b/vendor/github.com/docker/docker/daemon/utils_linux.go @@ -2,14 +2,7 @@ package daemon -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/docker/runconfig" - "github.com/opencontainers/runc/libcontainer/selinux" -) +import "github.com/opencontainers/runc/libcontainer/selinux" func selinuxSetDisabled() { selinux.SetDisabled() @@ -22,27 +15,3 @@ func selinuxFreeLxcContexts(label string) { func selinuxEnabled() bool { return selinux.SelinuxEnabled() } - -func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) ([]string, error) { - if hostConfig == nil { - return nil, nil - } - - out := []string{} - - // merge in the lxc conf options into the generic config map - if lxcConf := hostConfig.LxcConf; lxcConf != nil { - lxSlice := lxcConf.Slice() - for _, pair := range lxSlice { - // because lxc conf gets the driver name lxc.XXXX we need to trim it off - // and let the lxc driver add it back later if needed - if !strings.Contains(pair.Key, ".") { - return nil, errors.New("Illegal Key passed into LXC Configurations") - } - parts := strings.SplitN(pair.Key, ".", 2) - out = append(out, fmt.Sprintf("%s=%s", parts[1], pair.Value)) - } - } - - return out, nil -} diff --git a/vendor/github.com/docker/docker/daemon/utils_test.go b/vendor/github.com/docker/docker/daemon/utils_test.go deleted file mode 100644 index 99165f78..00000000 --- a/vendor/github.com/docker/docker/daemon/utils_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux - -package daemon - -import ( - "testing" - - "github.com/docker/docker/runconfig" -) - -func TestMergeLxcConfig(t *testing.T) { - kv := []runconfig.KeyValuePair{ - {"lxc.cgroups.cpuset", "1,2"}, - } - hostConfig := &runconfig.HostConfig{ - LxcConf: runconfig.NewLxcConfig(kv), - } - - out, err := mergeLxcConfIntoOptions(hostConfig) - if err != nil { - t.Fatalf("Failed to merge Lxc Config: %s", err) - } - - cpuset := out[0] - if expected := "cgroups.cpuset=1,2"; cpuset != expected { - t.Fatalf("expected %s got %s", expected, cpuset) - } -} diff --git a/vendor/github.com/docker/docker/daemon/volumes.go b/vendor/github.com/docker/docker/daemon/volumes.go index 5d43c73e..cca88983 100644 --- a/vendor/github.com/docker/docker/daemon/volumes.go +++ b/vendor/github.com/docker/docker/daemon/volumes.go @@ -2,87 +2,154 @@ package daemon import ( "errors" - "fmt" - "io/ioutil" "os" "path/filepath" "strings" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/system" + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/execdriver" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/runconfig" "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/label" ) -// ErrVolumeReadonly is used to signal an error when trying to copy data into -// a volume mount that is not writable. -var ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) -// TODO Windows. Further platform refactoring can still be done in volumes*.go +type mounts []execdriver.Mount -type mountPoint struct { - Name string - Destination string - Driver string - RW bool - Volume volume.Volume `json:"-"` - Source string - Mode string `json:"Relabel"` // Originally field was `Relabel`" -} - -func (m *mountPoint) Setup() (string, error) { - if m.Volume != nil { - return m.Volume.Mount() +// volumeToAPIType converts a volume.Volume to the type used by the remote API +func volumeToAPIType(v volume.Volume) *types.Volume { + return &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + Mountpoint: v.Path(), } - - if len(m.Source) > 0 { - if _, err := os.Stat(m.Source); err != nil { - if !os.IsNotExist(err) { - return "", err - } - if err := system.MkdirAll(m.Source, 0755); err != nil { - return "", err - } - } - return m.Source, nil - } - - return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") } -// hasResource checks whether the given absolute path for a container is in -// this mount point. If the relative path starts with `../` then the resource -// is outside of this mount point, but we can't simply check for this prefix -// because it misses `..` which is also outside of the mount, so check both. -func (m *mountPoint) hasResource(absolutePath string) bool { - relPath, err := filepath.Rel(m.Destination, absolutePath) - - return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) -} - -func (m *mountPoint) Path() string { - if m.Volume != nil { - return m.Volume.Path() - } - - return m.Source -} - -func copyExistingContents(source, destination string) error { - volList, err := ioutil.ReadDir(source) +// createVolume creates a volume. +func (daemon *Daemon) createVolume(name, driverName string, opts map[string]string) (volume.Volume, error) { + v, err := daemon.volumes.Create(name, driverName, opts) if err != nil { - return err + return nil, err } - if len(volList) > 0 { - srcList, err := ioutil.ReadDir(destination) + daemon.volumes.Increment(v) + return v, nil +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + + // 1. Read already configured mount points. + for name, point := range container.MountPoints { + mountPoints[name] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) if err != nil { return err } - if len(srcList) == 0 { - // If the source volume is empty copy files from the root into the volume - if err := chrootarchive.CopyWithTar(source, destination); err != nil { - return err + + c, err := daemon.Get(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, } + + if len(cp.Source) == 0 { + v, err := daemon.createVolume(cp.Name, cp.Driver, nil) + if err != nil { + return err + } + cp.Volume = v + } + + mountPoints[cp.Destination] = cp } } - return copyOwnership(source, destination) + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + // #10618 + bind, err := volume.ParseMountSpec(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + if binds[bind.Destination] { + return derr.ErrorCodeVolumeDup.WithArgs(bind.Destination) + } + + if len(bind.Name) > 0 && len(bind.Driver) > 0 { + // create the volume + v, err := daemon.createVolume(bind.Name, bind.Driver, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + bind = setBindModeIfNull(bind) + } + shared := label.IsShared(bind.Mode) + if err := label.Relabel(bind.Source, container.MountLabel, shared); err != nil { + return err + } + binds[bind.Destination] = true + mountPoints[bind.Destination] = bind + } + + bcVolumes, bcVolumesRW := configureBackCompatStructures(daemon, container, mountPoints) + + container.Lock() + container.MountPoints = mountPoints + setBackCompatStructures(container, bcVolumes, bcVolumesRW) + + container.Unlock() + + return nil } diff --git a/vendor/github.com/docker/docker/daemon/volumes_linux_unit_test.go b/vendor/github.com/docker/docker/daemon/volumes_linux_unit_test.go deleted file mode 100644 index 842e101e..00000000 --- a/vendor/github.com/docker/docker/daemon/volumes_linux_unit_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// +build experimental - -package daemon - -import ( - "testing" - - "github.com/docker/docker/runconfig" - "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" -) - -type fakeDriver struct{} - -func (fakeDriver) Name() string { return "fake" } -func (fakeDriver) Create(name string) (volume.Volume, error) { return nil, nil } -func (fakeDriver) Remove(v volume.Volume) error { return nil } - -func TestGetVolumeDriver(t *testing.T) { - _, err := getVolumeDriver("missing") - if err == nil { - t.Fatal("Expected error, was nil") - } - - volumedrivers.Register(fakeDriver{}, "fake") - d, err := getVolumeDriver("fake") - if err != nil { - t.Fatal(err) - } - if d.Name() != "fake" { - t.Fatalf("Expected fake driver, got %s\n", d.Name()) - } -} - -func TestParseBindMount(t *testing.T) { - cases := []struct { - bind string - driver string - expDest string - expSource string - expName string - expDriver string - mountLabel string - expRW bool - fail bool - }{ - {"/tmp:/tmp", "", "/tmp", "/tmp", "", "", "", true, false}, - {"/tmp:/tmp:ro", "", "/tmp", "/tmp", "", "", "", false, false}, - {"/tmp:/tmp:rw", "", "/tmp", "/tmp", "", "", "", true, false}, - {"/tmp:/tmp:foo", "", "/tmp", "/tmp", "", "", "", false, true}, - {"name:/tmp", "", "/tmp", "", "name", "local", "", true, false}, - {"name:/tmp", "external", "/tmp", "", "name", "external", "", true, false}, - {"name:/tmp:ro", "local", "/tmp", "", "name", "local", "", false, false}, - {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "local", "", true, false}, - } - - for _, c := range cases { - conf := &runconfig.Config{VolumeDriver: c.driver} - m, err := parseBindMount(c.bind, c.mountLabel, conf) - if c.fail { - if err == nil { - t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) - } - continue - } - - if m.Destination != c.expDest { - t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind) - } - - if m.Source != c.expSource { - t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind) - } - - if m.Name != c.expName { - t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind) - } - - if m.Driver != c.expDriver { - t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind) - } - - if m.RW != c.expRW { - t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind) - } - } -} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unit_test.go b/vendor/github.com/docker/docker/daemon/volumes_unit_test.go index b1e7f72f..dadf24e1 100644 --- a/vendor/github.com/docker/docker/daemon/volumes_unit_test.go +++ b/vendor/github.com/docker/docker/daemon/volumes_unit_test.go @@ -1,11 +1,14 @@ package daemon -import "testing" +import ( + "github.com/docker/docker/volume" + "testing" +) -func TestParseVolumeFrom(t *testing.T) { +func TestParseVolumesFrom(t *testing.T) { cases := []struct { spec string - expId string + expID string expMode string fail bool }{ @@ -17,7 +20,7 @@ func TestParseVolumeFrom(t *testing.T) { } for _, c := range cases { - id, mode, err := parseVolumesFrom(c.spec) + id, mode, err := volume.ParseVolumesFrom(c.spec) if c.fail { if err == nil { t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) @@ -25,8 +28,8 @@ func TestParseVolumeFrom(t *testing.T) { continue } - if id != c.expId { - t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec) + if id != c.expID { + t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) } if mode != c.expMode { t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) diff --git a/vendor/github.com/docker/docker/daemon/volumes_unix.go b/vendor/github.com/docker/docker/daemon/volumes_unix.go index d0a752a3..a7ac9911 100644 --- a/vendor/github.com/docker/docker/daemon/volumes_unix.go +++ b/vendor/github.com/docker/docker/daemon/volumes_unix.go @@ -3,7 +3,6 @@ package daemon import ( - "fmt" "io/ioutil" "os" "path/filepath" @@ -12,30 +11,54 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/system" - "github.com/docker/docker/runconfig" "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" + volumedrivers "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" - "github.com/opencontainers/runc/libcontainer/label" ) +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty copy files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + // copyOwnership copies the permissions and uid:gid of the source file -// into the destination file +// to the destination file func copyOwnership(source, destination string) error { stat, err := system.Stat(source) if err != nil { return err } - if err := os.Chown(destination, int(stat.Uid()), int(stat.Gid())); err != nil { + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { return err } return os.Chmod(destination, os.FileMode(stat.Mode())) } -func (container *Container) setupMounts() ([]execdriver.Mount, error) { +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) { var mounts []execdriver.Mount for _, m := range container.MountPoints { path, err := m.Setup() @@ -52,81 +75,34 @@ func (container *Container) setupMounts() ([]execdriver.Mount, error) { } mounts = sortMounts(mounts) - return append(mounts, container.networkMounts()...), nil -} - -func parseBindMount(spec string, mountLabel string, config *runconfig.Config) (*mountPoint, error) { - bind := &mountPoint{ - RW: true, - } - arr := strings.Split(spec, ":") - - switch len(arr) { - case 2: - bind.Destination = arr[1] - case 3: - bind.Destination = arr[1] - mode := arr[2] - isValid, isRw := volume.ValidateMountMode(mode) - if !isValid { - return nil, fmt.Errorf("invalid mode for volumes-from: %s", mode) - } - bind.RW = isRw - // Mode field is used by SELinux to decide whether to apply label - bind.Mode = mode - default: - return nil, fmt.Errorf("Invalid volume specification: %s", spec) - } - - name, source, err := parseVolumeSource(arr[0]) - if err != nil { - return nil, err - } - - if len(source) == 0 { - bind.Driver = config.VolumeDriver - if len(bind.Driver) == 0 { - bind.Driver = volume.DefaultDriverName - } - } else { - bind.Source = filepath.Clean(source) - } - - bind.Name = name - bind.Destination = filepath.Clean(bind.Destination) - return bind, nil + netMounts := container.networkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootUID, rootGID := daemon.GetRemappedUIDGID() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil } +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. func sortMounts(m []execdriver.Mount) []execdriver.Mount { sort.Sort(mounts(m)) return m } -type mounts []execdriver.Mount - -func (m mounts) Len() int { - return len(m) -} - -func (m mounts) Less(i, j int) bool { - return m.parts(i) < m.parts(j) -} - -func (m mounts) Swap(i, j int) { - m[i], m[j] = m[j], m[i] -} - -func (m mounts) parts(i int) int { - return len(strings.Split(filepath.Clean(m[i].Destination), string(os.PathSeparator))) -} - // migrateVolume links the contents of a volume created pre Docker 1.7 // into the location expected by the local driver. // It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. // It preserves the volume json configuration generated pre Docker 1.7 to be able to // downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. func migrateVolume(id, vfs string) error { - l, err := getVolumeDriver(volume.DefaultDriverName) + l, err := volumedrivers.Lookup(volume.DefaultDriverName) if err != nil { return err } @@ -184,12 +160,7 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error { } container.addLocalMountPoint(id, destination, rw) } else { // Bind mount - id, source, err := parseVolumeSource(hostPath) - // We should not find an error here coming - // from the old configuration, but who knows. - if err != nil { - return err - } + id, source := volume.ParseVolumeSource(hostPath) container.addBindMountPoint(id, source, destination, rw) } } @@ -197,7 +168,7 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error { // Volumes created with a Docker version >= 1.7. We verify integrity in case of data created // with Docker 1.7 RC versions that put the information in // DOCKER_ROOT/volumes/VOLUME_ID rather than DOCKER_ROOT/volumes/VOLUME_ID/_container_data. - l, err := getVolumeDriver(volume.DefaultDriverName) + l, err := volumedrivers.Lookup(volume.DefaultDriverName) if err != nil { return err } @@ -237,111 +208,25 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error { } } - return container.ToDisk() + return container.toDiskLocking() } return nil } -func parseVolumesFrom(spec string) (string, string, error) { - if len(spec) == 0 { - return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec) +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { + if bind.Mode == "" { + bind.Mode = "z" } - - specParts := strings.SplitN(spec, ":", 2) - id := specParts[0] - mode := "rw" - - if len(specParts) == 2 { - mode = specParts[1] - if isValid, _ := volume.ValidateMountMode(mode); !isValid { - return "", "", fmt.Errorf("invalid mode for volumes-from: %s", mode) - } - } - return id, mode, nil + return bind } -// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. -// It follows the next sequence to decide what to mount in each final destination: -// -// 1. Select the previously configured mount points for the containers, if any. -// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. -// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. -func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error { - binds := map[string]bool{} - mountPoints := map[string]*mountPoint{} - - // 1. Read already configured mount points. - for name, point := range container.MountPoints { - mountPoints[name] = point - } - - // 2. Read volumes from other containers. - for _, v := range hostConfig.VolumesFrom { - containerID, mode, err := parseVolumesFrom(v) - if err != nil { - return err - } - - c, err := daemon.Get(containerID) - if err != nil { - return err - } - - for _, m := range c.MountPoints { - cp := &mountPoint{ - Name: m.Name, - Source: m.Source, - RW: m.RW && volume.ReadWrite(mode), - Driver: m.Driver, - Destination: m.Destination, - } - - if len(cp.Source) == 0 { - v, err := createVolume(cp.Name, cp.Driver) - if err != nil { - return err - } - cp.Volume = v - } - - mountPoints[cp.Destination] = cp - } - } - - // 3. Read bind mounts - for _, b := range hostConfig.Binds { - // #10618 - bind, err := parseBindMount(b, container.MountLabel, container.Config) - if err != nil { - return err - } - - if binds[bind.Destination] { - return fmt.Errorf("Duplicate bind mount %s", bind.Destination) - } - - if len(bind.Name) > 0 && len(bind.Driver) > 0 { - // create the volume - v, err := createVolume(bind.Name, bind.Driver) - if err != nil { - return err - } - bind.Volume = v - bind.Source = v.Path() - // Since this is just a named volume and not a typical bind, set to shared mode `z` - if bind.Mode == "" { - bind.Mode = "z" - } - } - - if err := label.Relabel(bind.Source, container.MountLabel, bind.Mode); err != nil { - return err - } - binds[bind.Destination] = true - mountPoints[bind.Destination] = bind - } - +// configureBackCompatStructures is platform specific processing for +// registering mount points to populate old structures. +func configureBackCompatStructures(daemon *Daemon, container *Container, mountPoints map[string]*volume.MountPoint) (map[string]string, map[string]bool) { // Keep backwards compatible structures bcVolumes := map[string]string{} bcVolumesRW := map[string]bool{} @@ -349,52 +234,19 @@ func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runc if m.BackwardsCompatible() { bcVolumes[m.Destination] = m.Path() bcVolumesRW[m.Destination] = m.RW + + // This mountpoint is replacing an existing one, so the count needs to be decremented + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Decrement(mp.Volume) + } } } + return bcVolumes, bcVolumesRW +} - container.Lock() - container.MountPoints = mountPoints +// setBackCompatStructures is a platform specific helper function to set +// backwards compatible structures in the container when registering volumes. +func setBackCompatStructures(container *Container, bcVolumes map[string]string, bcVolumesRW map[string]bool) { container.Volumes = bcVolumes container.VolumesRW = bcVolumesRW - container.Unlock() - - return nil -} - -func createVolume(name, driverName string) (volume.Volume, error) { - vd, err := getVolumeDriver(driverName) - if err != nil { - return nil, err - } - return vd.Create(name) -} - -func removeVolume(v volume.Volume) error { - vd, err := getVolumeDriver(v.DriverName()) - if err != nil { - return nil - } - return vd.Remove(v) -} - -func getVolumeDriver(name string) (volume.Driver, error) { - if name == "" { - name = volume.DefaultDriverName - } - return volumedrivers.Lookup(name) -} - -func parseVolumeSource(spec string) (string, string, error) { - if !filepath.IsAbs(spec) { - return spec, "", nil - } - - return "", spec, nil -} - -// BackwardsCompatible decides whether this mount point can be -// used in old versions of Docker or not. -// Only bind mounts and local volumes can be used in old versions of Docker. -func (m *mountPoint) BackwardsCompatible() bool { - return len(m.Source) > 0 || m.Driver == volume.DefaultDriverName } diff --git a/vendor/github.com/docker/docker/daemon/volumes_windows.go b/vendor/github.com/docker/docker/daemon/volumes_windows.go index 40db7eda..73ec1719 100644 --- a/vendor/github.com/docker/docker/daemon/volumes_windows.go +++ b/vendor/github.com/docker/docker/daemon/volumes_windows.go @@ -3,17 +3,37 @@ package daemon import ( + "sort" + "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/runconfig" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/volume" ) -// Not supported on Windows -func copyOwnership(source, destination string) error { - return nil -} +// setupMounts configures the mount points for a container by appending each +// of the configured mounts on the container to the execdriver mount structure +// which will ultimately be passed into the exec driver during container creation. +// It also ensures each of the mounts are lexographically sorted. +func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) { + var mnts []execdriver.Mount + for _, mount := range container.MountPoints { // type is volume.MountPoint + // If there is no source, take it from the volume path + s := mount.Source + if s == "" && mount.Volume != nil { + s = mount.Volume.Path() + } + if s == "" { + return nil, derr.ErrorCodeVolumeNoSourceForMount.WithArgs(mount.Name, mount.Driver, mount.Destination) + } + mnts = append(mnts, execdriver.Mount{ + Source: s, + Destination: mount.Destination, + Writable: mount.RW, + }) + } -func (container *Container) setupMounts() ([]execdriver.Mount, error) { - return nil, nil + sort.Sort(mounts(mnts)) + return mnts, nil } // verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. @@ -22,7 +42,20 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error { return nil } -// TODO Windows: This can be further factored out. Called from daemon\daemon.go -func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error { - return nil +// setBindModeIfNull is platform specific processing which is a no-op on +// Windows. +func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { + return bind +} + +// configureBackCompatStructures is platform specific processing for +// registering mount points to populate old structures. This is a no-op on Windows. +func configureBackCompatStructures(*Daemon, *Container, map[string]*volume.MountPoint) (map[string]string, map[string]bool) { + return nil, nil +} + +// setBackCompatStructures is a platform specific helper function to set +// backwards compatible structures in the container when registering volumes. +// This is a no-op on Windows. +func setBackCompatStructures(*Container, map[string]string, map[string]bool) { } diff --git a/vendor/github.com/docker/docker/daemon/wait.go b/vendor/github.com/docker/docker/daemon/wait.go index 1101b2f0..e429c460 100644 --- a/vendor/github.com/docker/docker/daemon/wait.go +++ b/vendor/github.com/docker/docker/daemon/wait.go @@ -2,6 +2,11 @@ package daemon import "time" +// ContainerWait stops processing until the given container is +// stopped. If the container is not found, an error is returned. On a +// successful stop, the exit code of the container is returned. On a +// timeout, an error is returned. If you want to wait forever, supply +// a negative duration for the timeout. func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { container, err := daemon.Get(name) if err != nil { diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 00000000..1d5862d3 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,16 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + + IAmStatic string = "library-import" + InitSHA1 string = "library-import" + InitPath string = "library-import" +) diff --git a/vendor/github.com/docker/docker/errors/README.md b/vendor/github.com/docker/docker/errors/README.md new file mode 100644 index 00000000..81fa04cc --- /dev/null +++ b/vendor/github.com/docker/docker/errors/README.md @@ -0,0 +1,58 @@ +Docker 'errors' package +======================= + +This package contains all of the error messages generated by the Docker +engine that might be exposed via the Docker engine's REST API. + +Each top-level engine package will have its own file in this directory +so that there's a clear grouping of errors, instead of just one big +file. The errors for each package are defined here instead of within +their respective package structure so that Docker CLI code that may need +to import these error definition files will not need to know or understand +the engine's package/directory structure. In other words, all they should +need to do is import `.../docker/errors` and they will automatically +pick up all Docker engine defined errors. This also gives the engine +developers the freedom to change the engine packaging structure (e.g. to +CRUD packages) without worrying about breaking existing clients. + +These errors are defined using the 'errcode' package. The `errcode` package +allows for each error to be typed and include all information necessary to +have further processing done on them if necessary. In particular, each error +includes: + +* Value - a unique string (in all caps) associated with this error. +Typically, this string is the same name as the variable name of the error +(w/o the `ErrorCode` text) but in all caps. + +* Message - the human readable sentence that will be displayed for this +error. It can contain '%s' substitutions that allows for the code generating +the error to specify values that will be inserted in the string prior to +being displayed to the end-user. The `WithArgs()` function can be used to +specify the insertion strings. Note, the evaluation of the strings will be +done at the time `WithArgs()` is called. + +* Description - additional human readable text to further explain the +circumstances of the error situation. + +* HTTPStatusCode - when the error is returned back to a CLI, this value +will be used to populate the HTTP status code. If not present the default +value will be `StatusInternalServerError`, 500. + +Not all errors generated within the engine's executable will be propagated +back to the engine's API layer. For example, it is expected that errors +generated by vendored code (under `docker/vendor`) and packaged code +(under `docker/pkg`) will be converted into errors defined by this package. + +When processing an errcode error, if you are looking for a particular +error then you can do something like: + +``` +import derr "github.com/docker/docker/errors" + +... + +err := someFunc() +if err.ErrorCode() == derr.ErrorCodeNoSuchContainer { + ... +} +``` diff --git a/vendor/github.com/docker/docker/errors/builder.go b/vendor/github.com/docker/docker/errors/builder.go new file mode 100644 index 00000000..38d0d3c3 --- /dev/null +++ b/vendor/github.com/docker/docker/errors/builder.go @@ -0,0 +1,93 @@ +package errors + +// This file contains all of the errors that can be generated from the +// docker/builder component. + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + // ErrorCodeAtLeastOneArg is generated when the parser comes across a + // Dockerfile command that doesn't have any args. + ErrorCodeAtLeastOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "ATLEASTONEARG", + Message: "%s requires at least one argument", + Description: "The specified command requires at least one argument", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeExactlyOneArg is generated when the parser comes across a + // Dockerfile command that requires exactly one arg but got less/more. + ErrorCodeExactlyOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXACTLYONEARG", + Message: "%s requires exactly one argument", + Description: "The specified command requires exactly one argument", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeAtLeastTwoArgs is generated when the parser comes across a + // Dockerfile command that requires at least two args but got less. + ErrorCodeAtLeastTwoArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "ATLEASTTWOARGS", + Message: "%s requires at least two arguments", + Description: "The specified command requires at least two arguments", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeTooManyArgs is generated when the parser comes across a + // Dockerfile command that has more args than it should + ErrorCodeTooManyArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TOOMANYARGS", + Message: "Bad input to %s, too many args", + Description: "The specified command was passed too many arguments", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeChainOnBuild is generated when the parser comes across a + // Dockerfile command that is trying to chain ONBUILD commands. + ErrorCodeChainOnBuild = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CHAINONBUILD", + Message: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", + Description: "ONBUILD Dockerfile commands aren't allow on ONBUILD commands", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeBadOnBuildCmd is generated when the parser comes across a + // an ONBUILD Dockerfile command with an invalid trigger/command. + ErrorCodeBadOnBuildCmd = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BADONBUILDCMD", + Message: "%s isn't allowed as an ONBUILD trigger", + Description: "The specified ONBUILD command isn't allowed", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeMissingFrom is generated when the Dockerfile is missing + // a FROM command. + ErrorCodeMissingFrom = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MISSINGFROM", + Message: "Please provide a source image with `from` prior to run", + Description: "The Dockerfile is missing a FROM command", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNotOnWindows is generated when the specified Dockerfile + // command is not supported on Windows. + ErrorCodeNotOnWindows = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOTONWINDOWS", + Message: "%s is not supported on Windows", + Description: "The specified Dockerfile command is not supported on Windows", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeEmpty is generated when the specified Volume string + // is empty. + ErrorCodeVolumeEmpty = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMEEMPTY", + Message: "Volume specified can not be an empty string", + Description: "The specified volume can not be an empty string", + HTTPStatusCode: http.StatusInternalServerError, + }) +) diff --git a/vendor/github.com/docker/docker/errors/daemon.go b/vendor/github.com/docker/docker/errors/daemon.go new file mode 100644 index 00000000..d406d271 --- /dev/null +++ b/vendor/github.com/docker/docker/errors/daemon.go @@ -0,0 +1,951 @@ +package errors + +// This file contains all of the errors that can be generated from the +// docker/daemon component. + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + // ErrorCodeNoSuchContainer is generated when we look for a container by + // name or ID and we can't find it. + ErrorCodeNoSuchContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOSUCHCONTAINER", + Message: "no such id: %s", + Description: "The specified container can not be found", + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeUnregisteredContainer is generated when we try to load + // a storage driver for an unregistered container + ErrorCodeUnregisteredContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "UNREGISTEREDCONTAINER", + Message: "Can't load storage driver for unregistered container %s", + Description: "An attempt was made to load the storage driver for a container that is not registered with the daemon", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeContainerBeingRemoved is generated when an attempt to start + // a container is made but its in the process of being removed, or is dead. + ErrorCodeContainerBeingRemoved = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CONTAINERBEINGREMOVED", + Message: "Container is marked for removal and cannot be started.", + Description: "An attempt was made to start a container that is in the process of being deleted", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnpauseContainer is generated when we attempt to stop a + // container but its paused. + ErrorCodeUnpauseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "UNPAUSECONTAINER", + Message: "Container %s is paused. Unpause the container before stopping", + Description: "The specified container is paused, before it can be stopped it must be unpaused", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodePausedContainer is generated when we attempt to attach a + // container but its paused. + ErrorCodePausedContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CONTAINERPAUSED", + Message: "Container %s is paused. Unpause the container before attach", + Description: "The specified container is paused, unpause the container before attach", + HTTPStatusCode: http.StatusConflict, + }) + // ErrorCodeAlreadyPaused is generated when we attempt to pause a + // container when its already paused. + ErrorCodeAlreadyPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "ALREADYPAUSED", + Message: "Container %s is already paused", + Description: "The specified container is already in the paused state", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNotPaused is generated when we attempt to unpause a + // container when its not paused. + ErrorCodeNotPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOTPAUSED", + Message: "Container %s is not paused", + Description: "The specified container can not be unpaused because it is not in a paused state", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeImageUnregContainer is generated when we attempt to get the + // image of an unknown/unregistered container. + ErrorCodeImageUnregContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "IMAGEUNREGCONTAINER", + Message: "Can't get image of unregistered container", + Description: "An attempt to retrieve the image of a container was made but the container is not registered", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeEmptyID is generated when an ID is the emptry string. + ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EMPTYID", + Message: "Invalid empty id", + Description: "An attempt was made to register a container but the container's ID can not be an empty string", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeLoggingFactory is generated when we could not load the + // log driver. + ErrorCodeLoggingFactory = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "LOGGINGFACTORY", + Message: "Failed to get logging factory: %v", + Description: "An attempt was made to register a container but the container's ID can not be an empty string", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeInitLogger is generated when we could not initialize + // the logging driver. + ErrorCodeInitLogger = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "INITLOGGER", + Message: "Failed to initialize logging driver: %v", + Description: "An error occurred while trying to initialize the logging driver", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNotRunning is generated when we need to verify that + // a container is running, but its not. + ErrorCodeNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOTRUNNING", + Message: "Container %s is not running", + Description: "The specified action can not be taken due to the container not being in a running state", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeLinkNotRunning is generated when we try to link to a + // container that is not running. + ErrorCodeLinkNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "LINKNOTRUNNING", + Message: "Cannot link to a non running container: %s AS %s", + Description: "An attempt was made to link to a container but the container is not in a running state", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeDeviceInfo is generated when there is an error while trying + // to get info about a custom device. + // container that is not running. + ErrorCodeDeviceInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DEVICEINFO", + Message: "error gathering device information while adding custom device %q: %s", + Description: "There was an error while trying to retrieve the information about a custom device", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeEmptyEndpoint is generated when the endpoint for a port + // map is nil. + ErrorCodeEmptyEndpoint = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EMPTYENDPOINT", + Message: "invalid endpoint while building port map info", + Description: "The specified endpoint for the port mapping is empty", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeEmptyNetwork is generated when the networkSettings for a port + // map is nil. + ErrorCodeEmptyNetwork = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EMPTYNETWORK", + Message: "invalid networksettings while building port map info", + Description: "The specified endpoint for the port mapping is empty", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeParsingPort is generated when there is an error parsing + // a "port" string. + ErrorCodeParsingPort = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "PARSINGPORT", + Message: "Error parsing Port value(%v):%v", + Description: "There was an error while trying to parse the specified 'port' value", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNoSandbox is generated when we can't find the specified + // sandbox(network) by ID. + ErrorCodeNoSandbox = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOSANDBOX", + Message: "error locating sandbox id %s: %v", + Description: "There was an error trying to located the specified networking sandbox", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNetworkUpdate is generated when there is an error while + // trying update a network/sandbox config. + ErrorCodeNetworkUpdate = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NETWORKUPDATE", + Message: "Update network failed: %v", + Description: "There was an error trying to update the configuration information of the specified network sandbox", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNetworkRefresh is generated when there is an error while + // trying refresh a network/sandbox config. + ErrorCodeNetworkRefresh = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NETWORKREFRESH", + Message: "Update network failed: Failure in refresh sandbox %s: %v", + Description: "There was an error trying to refresh the configuration information of the specified network sandbox", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeHostPort is generated when there was an error while trying + // to parse a "host/port" string. + ErrorCodeHostPort = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "HOSTPORT", + Message: "Error parsing HostPort value(%s):%v", + Description: "There was an error trying to parse the specified 'HostPort' value", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNetworkConflict is generated when we try to publish a service + // in network mode. + ErrorCodeNetworkConflict = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NETWORKCONFLICT", + Message: "conflicting options: publishing a service and network mode", + Description: "It is not possible to publish a service when it is in network mode", + HTTPStatusCode: http.StatusConflict, + }) + + // ErrorCodeJoinInfo is generated when we failed to update a container's + // join info. + ErrorCodeJoinInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "JOININFO", + Message: "Updating join info failed: %v", + Description: "There was an error during an attempt update a container's join information", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeIPCRunning is generated when we try to join a container's + // IPC but its not running. + ErrorCodeIPCRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "IPCRUNNING", + Message: "cannot join IPC of a non running container: %s", + Description: "An attempt was made to join the IPC of a container, but the container is not running", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNotADir is generated when we try to create a directory + // but the path isn't a dir. + ErrorCodeNotADir = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOTADIR", + Message: "Cannot mkdir: %s is not a directory", + Description: "An attempt was made create a directory, but the location in which it is being created is not a directory", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeParseContainer is generated when the reference to a + // container doesn't include a ":" (another container). + ErrorCodeParseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "PARSECONTAINER", + Message: "no container specified to join network", + Description: "The specified reference to a container is missing a ':' as a separator between 'container' and 'name'/'id'", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeJoinSelf is generated when we try to network to ourselves. + ErrorCodeJoinSelf = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "JOINSELF", + Message: "cannot join own network", + Description: "An attempt was made to have a container join its own network", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeJoinRunning is generated when we try to network to ourselves. + ErrorCodeJoinRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "JOINRUNNING", + Message: "cannot join network of a non running container: %s", + Description: "An attempt to join the network of a container, but that container isn't running", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeModeNotContainer is generated when we try to network to + // another container but the mode isn't 'container'. + ErrorCodeModeNotContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MODENOTCONTAINER", + Message: "network mode not set to container", + Description: "An attempt was made to connect to a container's network but the mode wasn't set to 'container'", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRemovingVolume is generated when we try remove a mount + // point (volume) but fail. + ErrorCodeRemovingVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "REMOVINGVOLUME", + Message: "Error removing volumes:\n%v", + Description: "There was an error while trying to remove the mount point (volume) of a container", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeInvalidNetworkMode is generated when an invalid network + // mode value is specified. + ErrorCodeInvalidNetworkMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "INVALIDNETWORKMODE", + Message: "invalid network mode: %s", + Description: "The specified networking mode is not valid", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeGetGraph is generated when there was an error while + // trying to find a graph/image. + ErrorCodeGetGraph = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "GETGRAPH", + Message: "Failed to graph.Get on ImageID %s - %s", + Description: "There was an error trying to retrieve the image for the specified image ID", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeGetLayer is generated when there was an error while + // trying to retrieve a particular layer of an image. + ErrorCodeGetLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "GETLAYER", + Message: "Failed to get layer path from graphdriver %s for ImageID %s - %s", + Description: "There was an error trying to retrieve the layer of the specified image", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodePutLayer is generated when there was an error while + // trying to 'put' a particular layer of an image. + ErrorCodePutLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "PUTLAYER", + Message: "Failed to put layer path from graphdriver %s for ImageID %s - %s", + Description: "There was an error trying to store a layer for the specified image", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeGetLayerMetadata is generated when there was an error while + // trying to retrieve the metadata of a layer of an image. + ErrorCodeGetLayerMetadata = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "GETLAYERMETADATA", + Message: "Failed to get layer metadata - %s", + Description: "There was an error trying to retrieve the metadata of a layer for the specified image", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeEmptyConfig is generated when the input config data + // is empty. + ErrorCodeEmptyConfig = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EMPTYCONFIG", + Message: "Config cannot be empty in order to create a container", + Description: "While trying to create a container, the specified configuration information was empty", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNoSuchImageHash is generated when we can't find the + // specified image by its hash + ErrorCodeNoSuchImageHash = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOSUCHIMAGEHASH", + Message: "No such image: %s", + Description: "An attempt was made to find an image by its hash, but the lookup failed", + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeNoSuchImageTag is generated when we can't find the + // specified image byt its name/tag. + ErrorCodeNoSuchImageTag = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOSUCHIMAGETAG", + Message: "No such image: %s:%s", + Description: "An attempt was made to find an image by its name/tag, but the lookup failed", + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeMountOverFile is generated when we try to mount a volume + // over an existing file (but not a dir). + ErrorCodeMountOverFile = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MOUNTOVERFILE", + Message: "cannot mount volume over existing file, file exists %s", + Description: "An attempt was made to mount a volume at the same location as a pre-existing file", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeMountSetup is generated when we can't define a mount point + // due to the source and destination being undefined. + ErrorCodeMountSetup = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MOUNTSETUP", + Message: "Unable to setup mount point, neither source nor volume defined", + Description: "An attempt was made to setup a mount point, but the source and destination are undefined", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeInvalidMode is generated when the mode of a volume/bind + // mount is invalid. + ErrorCodeVolumeInvalidMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMEINVALIDMODE", + Message: "invalid mode: %q", + Description: "An invalid 'mode' was specified", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeInvalid is generated when the format fo the + // volume specification isn't valid. + ErrorCodeVolumeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMEINVALID", + Message: "Invalid volume specification: '%s'", + Description: "An invalid 'volume' was specified in the mount request", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeAbs is generated when path to a volume isn't absolute. + ErrorCodeVolumeAbs = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMEABS", + Message: "Invalid volume destination path: '%s' mount path must be absolute.", + Description: "An invalid 'destination' path was specified in the mount request, it must be an absolute path", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeName is generated when the name of named volume isn't valid. + ErrorCodeVolumeName = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUME_NAME_INVALID", + Message: "%q includes invalid characters for a local volume name, only %q are allowed", + Description: "The name of volume is invalid", + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeVolumeSlash is generated when destination path to a volume is / + ErrorCodeVolumeSlash = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMESLASH", + Message: "Invalid specification: destination can't be '/' in '%s'", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeDestIsC is generated the destination is c: (Windows specific) + ErrorCodeVolumeDestIsC = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMEDESTISC", + Message: "Destination drive letter in '%s' cannot be c:", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeDestIsCRoot is generated the destination path is c:\ (Windows specific) + ErrorCodeVolumeDestIsCRoot = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMEDESTISCROOT", + Message: `Destination path in '%s' cannot be c:\`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeSourceNotFound is generated the source directory could not be found (Windows specific) + ErrorCodeVolumeSourceNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMESOURCENOTFOUND", + Message: "Source directory '%s' could not be found: %s", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeSourceNotDirectory is generated the source is not a directory (Windows specific) + ErrorCodeVolumeSourceNotDirectory = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMESOURCENOTDIRECTORY", + Message: "Source '%s' is not a directory", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeFromBlank is generated when path to a volume is blank. + ErrorCodeVolumeFromBlank = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMEFROMBLANK", + Message: "malformed volumes-from specification: %q", + Description: "An invalid 'destination' path was specified in the mount request, it must not be blank", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeDup is generated when we try to mount two volumes + // to the same path. + ErrorCodeVolumeDup = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMEDUP", + Message: "Duplicate bind mount '%s'", + Description: "An attempt was made to mount a volume but the specified destination location is already used in a previous mount", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeNoSourceForMount is generated when no source directory + // for a volume mount was found. (Windows specific) + ErrorCodeVolumeNoSourceForMount = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMENOSOURCEFORMOUNT", + Message: "No source for mount name '%s' driver %q destination '%s'", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeVolumeNameReservedWord is generated when the name in a volume + // uses a reserved word for filenames. (Windows specific) + ErrorCodeVolumeNameReservedWord = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUMENAMERESERVEDWORD", + Message: "Volume name %q cannot be a reserved word for Windows filenames", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeCantUnpause is generated when there's an error while trying + // to unpause a container. + ErrorCodeCantUnpause = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CANTUNPAUSE", + Message: "Cannot unpause container %s: %s", + Description: "An error occurred while trying to unpause the specified container", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodePSError is generated when trying to run 'ps'. + ErrorCodePSError = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "PSError", + Message: "Error running ps: %s", + Description: "There was an error trying to run the 'ps' command in the specified container", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNoPID is generated when looking for the PID field in the + // ps output. + ErrorCodeNoPID = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOPID", + Message: "Couldn't find PID field in ps output", + Description: "There was no 'PID' field in the output of the 'ps' command that was executed", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeBadPID is generated when we can't convert a PID to an int. + ErrorCodeBadPID = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BADPID", + Message: "Unexpected pid '%s': %s", + Description: "While trying to parse the output of the 'ps' command, the 'PID' field was not an integer", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNoTop is generated when we try to run 'top' but can't + // because we're on windows. + ErrorCodeNoTop = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOTOP", + Message: "Top is not supported on Windows", + Description: "The 'top' command is not supported on Windows", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeStopped is generated when we try to stop a container + // that is already stopped. + ErrorCodeStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "STOPPED", + Message: "Container already stopped", + Description: "An attempt was made to stop a container, but the container is already stopped", + HTTPStatusCode: http.StatusNotModified, + }) + + // ErrorCodeCantStop is generated when we try to stop a container + // but failed for some reason. + ErrorCodeCantStop = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CANTSTOP", + Message: "Cannot stop container %s: %s\n", + Description: "An error occurred while tring to stop the specified container", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeBadCPUFields is generated when the number of CPU fields is + // less than 8. + ErrorCodeBadCPUFields = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BADCPUFIELDS", + Message: "invalid number of cpu fields", + Description: "While reading the '/proc/stat' file, the number of 'cpu' fields is less than 8", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeBadCPUInt is generated the CPU field can't be parsed as an int. + ErrorCodeBadCPUInt = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BADCPUINT", + Message: "Unable to convert value %s to int: %s", + Description: "While reading the '/proc/stat' file, the 'CPU' field could not be parsed as an integer", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeBadStatFormat is generated the output of the stat info + // isn't parseable. + ErrorCodeBadStatFormat = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BADSTATFORMAT", + Message: "invalid stat format", + Description: "There was an error trying to parse the '/proc/stat' file", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeTimedOut is generated when a timer expires. + ErrorCodeTimedOut = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TIMEDOUT", + Message: "Timed out: %v", + Description: "A timer expired", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeAlreadyRemoving is generated when we try to remove a + // container that is already being removed. + ErrorCodeAlreadyRemoving = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "ALREADYREMOVING", + Message: "Status is already RemovalInProgress", + Description: "An attempt to remove a container was made, but the container is already in the process of being removed", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeStartPaused is generated when we start a paused container. + ErrorCodeStartPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "STARTPAUSED", + Message: "Cannot start a paused container, try unpause instead.", + Description: "An attempt to start a container was made, but the container is paused. Unpause it first", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeAlreadyStarted is generated when we try to start a container + // that is already running. + ErrorCodeAlreadyStarted = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "ALREADYSTARTED", + Message: "Container already started", + Description: "An attempt to start a container was made, but the container is already started", + HTTPStatusCode: http.StatusNotModified, + }) + + // ErrorCodeHostConfigStart is generated when a HostConfig is passed + // into the start command. + ErrorCodeHostConfigStart = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "HOSTCONFIGSTART", + Message: "Supplying a hostconfig on start is not supported. It should be supplied on create", + Description: "The 'start' command does not accept 'HostConfig' data, try using the 'create' command instead", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeCantRestart is generated when an error occurred while + // trying to restart a container. + ErrorCodeCantRestart = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CANTRESTART", + Message: "Cannot restart container %s: %s", + Description: "There was an error while trying to restart a container", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeEmptyRename is generated when one of the names on a + // rename is empty. + ErrorCodeEmptyRename = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EMPTYRENAME", + Message: "Neither old nor new names may be empty", + Description: "An attempt was made to rename a container but either the old or new names were blank", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRenameTaken is generated when we try to rename but the + // new name isn't available. + ErrorCodeRenameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RENAMETAKEN", + Message: "Error when allocating new name: %s", + Description: "The new name specified on the 'rename' command is already being used", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRenameDelete is generated when we try to rename but + // failed trying to delete the old container. + ErrorCodeRenameDelete = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RENAMEDELETE", + Message: "Failed to delete container %q: %v", + Description: "There was an error trying to delete the specified container", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodePauseError is generated when we try to pause a container + // but failed. + ErrorCodePauseError = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "PAUSEERROR", + Message: "Cannot pause container %s: %s", + Description: "There was an error trying to pause the specified container", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNeedStream is generated when we try to stream a container's + // logs but no output stream was specified. + ErrorCodeNeedStream = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NEEDSTREAM", + Message: "You must choose at least one stream", + Description: "While trying to stream a container's logs, no output stream was specified", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeDanglingOne is generated when we try to specify more than one + // 'dangling' specifier. + ErrorCodeDanglingOne = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DANLGINGONE", + Message: "Conflict: cannot use more than 1 value for `dangling` filter", + Description: "The specified 'dangling' filter may not have more than one value", + HTTPStatusCode: http.StatusConflict, + }) + + // ErrorCodeImgDelUsed is generated when we try to delete an image + // but it is being used. + ErrorCodeImgDelUsed = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "IMGDELUSED", + Message: "conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", + Description: "An attempt was made to delete an image but it is currently being used", + HTTPStatusCode: http.StatusConflict, + }) + + // ErrorCodeImgNoParent is generated when we try to find an image's + // parent but its not in the graph. + ErrorCodeImgNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "IMGNOPARENT", + Message: "unable to get parent image: %v", + Description: "There was an error trying to find an image's parent, it was not in the graph", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeExportFailed is generated when an export fails. + ErrorCodeExportFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXPORTFAILED", + Message: "%s: %s", + Description: "There was an error during an export operation", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeExecResize is generated when we try to resize an exec + // but its not running. + ErrorCodeExecResize = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXECRESIZE", + Message: "Exec %s is not running, so it can not be resized.", + Description: "An attempt was made to resize an 'exec', but the 'exec' is not running", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeContainerNotRunning is generated when we try to get the info + // on an exec but the container is not running. + ErrorCodeContainerNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CONTAINERNOTRUNNING", + Message: "Container %s is not running: %s", + Description: "An attempt was made to retrieve the information about an 'exec' but the container is not running", + HTTPStatusCode: http.StatusConflict, + }) + + // ErrorCodeNoExecID is generated when we try to get the info + // on an exec but it can't be found. + ErrorCodeNoExecID = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOEXECID", + Message: "No such exec instance '%s' found in daemon", + Description: "The specified 'exec' instance could not be found", + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeExecPaused is generated when we try to start an exec + // but the container is paused. + ErrorCodeExecPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXECPAUSED", + Message: "Container %s is paused, unpause the container before exec", + Description: "An attempt to start an 'exec' was made, but the owning container is paused", + HTTPStatusCode: http.StatusConflict, + }) + + // ErrorCodeExecRunning is generated when we try to start an exec + // but its already running. + ErrorCodeExecRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXECRUNNING", + Message: "Error: Exec command %s is already running", + Description: "An attempt to start an 'exec' was made, but 'exec' is already running", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeExecCantRun is generated when we try to start an exec + // but it failed for some reason. + ErrorCodeExecCantRun = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXECCANTRUN", + Message: "Cannot run exec command %s in container %s: %s", + Description: "An attempt to start an 'exec' was made, but an error occurred", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeExecAttach is generated when we try to attach to an exec + // but failed. + ErrorCodeExecAttach = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXECATTACH", + Message: "attach failed with error: %s", + Description: "There was an error while trying to attach to an 'exec'", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeExecContainerStopped is generated when we try to start + // an exec but then the container stopped. + ErrorCodeExecContainerStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXECCONTAINERSTOPPED", + Message: "container stopped while running exec", + Description: "An attempt was made to start an 'exec' but the owning container is in the 'stopped' state", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeDefaultName is generated when we try to delete the + // default name of a container. + ErrorCodeDefaultName = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DEFAULTNAME", + Message: "Conflict, cannot remove the default name of the container", + Description: "An attempt to delete the default name of a container was made, but that is not allowed", + HTTPStatusCode: http.StatusConflict, + }) + + // ErrorCodeNoParent is generated when we try to delete a container + // but we can't find its parent image. + ErrorCodeNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOPARENT", + Message: "Cannot get parent %s for name %s", + Description: "An attempt was made to delete a container but its parent image could not be found", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeCantDestroy is generated when we try to delete a container + // but failed for some reason. + ErrorCodeCantDestroy = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CANTDESTROY", + Message: "Cannot destroy container %s: %v", + Description: "An attempt was made to delete a container but it failed", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRmRunning is generated when we try to delete a container + // but its still running. + ErrorCodeRmRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMRUNNING", + Message: "Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f", + Description: "An attempt was made to delete a container but the container is still running, try to either stop it first or use '-f'", + HTTPStatusCode: http.StatusConflict, + }) + + // ErrorCodeRmFailed is generated when we try to delete a container + // but it failed for some reason. + ErrorCodeRmFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMFAILED", + Message: "Could not kill running container, cannot remove - %v", + Description: "An error occurred while trying to delete a running container", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRmNotFound is generated when we try to delete a container + // but couldn't find it. + ErrorCodeRmNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMNOTFOUND", + Message: "Could not kill running container, cannot remove - %v", + Description: "An attempt to delete a container was made but the container could not be found", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRmState is generated when we try to delete a container + // but couldn't set its state to RemovalInProgress. + ErrorCodeRmState = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMSTATE", + Message: "Failed to set container state to RemovalInProgress: %s", + Description: "An attempt to delete a container was made, but there as an error trying to set its state to 'RemovalInProgress'", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRmDriverFS is generated when we try to delete a container + // but the driver failed to delete its filesystem. + ErrorCodeRmDriverFS = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMDRIVERFS", + Message: "Driver %s failed to remove root filesystem %s: %s", + Description: "While trying to delete a container, the driver failed to remove the root filesystem", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRmInit is generated when we try to delete a container + // but failed deleting its init filesystem. + ErrorCodeRmInit = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMINIT", + Message: "Driver %s failed to remove init filesystem %s: %s", + Description: "While trying to delete a container, the driver failed to remove the init filesystem", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRmFS is generated when we try to delete a container + // but failed deleting its filesystem. + ErrorCodeRmFS = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMFS", + Message: "Unable to remove filesystem for %v: %v", + Description: "While trying to delete a container, the driver failed to remove the filesystem", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRmExecDriver is generated when we try to delete a container + // but failed deleting its exec driver data. + ErrorCodeRmExecDriver = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMEXECDRIVER", + Message: "Unable to remove execdriver data for %s: %s", + Description: "While trying to delete a container, there was an error trying to remove th exec driver data", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeRmVolumeInUse is generated when we try to delete a container + // but failed deleting a volume because its being used. + ErrorCodeRmVolumeInUse = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMVOLUMEINUSE", + Message: "Conflict: %v", + Description: "While trying to delete a container, one of its volumes is still being used", + HTTPStatusCode: http.StatusConflict, + }) + + // ErrorCodeRmVolume is generated when we try to delete a container + // but failed deleting a volume. + ErrorCodeRmVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "RMVOLUME", + Message: "Error while removing volume %s: %v", + Description: "While trying to delete a container, there was an error trying to delete one of its volumes", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeInvalidCpusetCpus is generated when user provided cpuset CPUs + // are invalid. + ErrorCodeInvalidCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "INVALIDCPUSETCPUS", + Message: "Invalid value %s for cpuset cpus.", + Description: "While verifying the container's 'HostConfig', CpusetCpus value was in an incorrect format", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeInvalidCpusetMems is generated when user provided cpuset mems + // are invalid. + ErrorCodeInvalidCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "INVALIDCPUSETMEMS", + Message: "Invalid value %s for cpuset mems.", + Description: "While verifying the container's 'HostConfig', CpusetMems value was in an incorrect format", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNotAvailableCpusetCpus is generated when user provided cpuset + // CPUs aren't available in the container's cgroup. + ErrorCodeNotAvailableCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOTAVAILABLECPUSETCPUS", + Message: "Requested CPUs are not available - requested %s, available: %s.", + Description: "While verifying the container's 'HostConfig', cpuset CPUs provided aren't available in the container's cgroup available set", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeNotAvailableCpusetMems is generated when user provided cpuset + // memory nodes aren't available in the container's cgroup. + ErrorCodeNotAvailableCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NOTAVAILABLECPUSETMEMS", + Message: "Requested memory nodes are not available - requested %s, available: %s.", + Description: "While verifying the container's 'HostConfig', cpuset memory nodes provided aren't available in the container's cgroup available set", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorVolumeNameTaken is generated when an error occurred while + // trying to create a volume that has existed using different driver. + ErrorVolumeNameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "VOLUME_NAME_TAKEN", + Message: "A volume name %s already exists with the %s driver. Choose a different volume name.", + Description: "An attempt to create a volume using a driver but the volume already exists with a different driver", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeCmdNotFound is generated when container cmd can't start, + // container command not found error, exit code 127 + ErrorCodeCmdNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CMDNOTFOUND", + Message: "Container command not found or does not exist.", + Description: "Command could not be found, command does not exist", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeCmdCouldNotBeInvoked is generated when container cmd can't start, + // container command permission denied error, exit code 126 + ErrorCodeCmdCouldNotBeInvoked = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CMDCOULDNOTBEINVOKED", + Message: "Container command could not be invoked.", + Description: "Permission denied, cannot invoke command", + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeCantStart is generated when container cmd can't start, + // for any reason other than above 2 errors + ErrorCodeCantStart = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CANTSTART", + Message: "Cannot start container %s: %s", + Description: "There was an error while trying to start a container", + HTTPStatusCode: http.StatusInternalServerError, + }) +) diff --git a/vendor/github.com/docker/docker/errors/error.go b/vendor/github.com/docker/docker/errors/error.go new file mode 100644 index 00000000..37222d44 --- /dev/null +++ b/vendor/github.com/docker/docker/errors/error.go @@ -0,0 +1,6 @@ +package errors + +// This file contains all of the errors that can be generated from the +// docker engine but are not tied to any specific top-level component. + +const errGroup = "engine" diff --git a/vendor/github.com/docker/docker/errors/image.go b/vendor/github.com/docker/docker/errors/image.go new file mode 100644 index 00000000..04efe6fc --- /dev/null +++ b/vendor/github.com/docker/docker/errors/image.go @@ -0,0 +1,20 @@ +package errors + +// This file contains all of the errors that can be generated from the +// docker/image component. + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + // ErrorCodeInvalidImageID is generated when image id specified is incorrectly formatted. + ErrorCodeInvalidImageID = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "INVALIDIMAGEID", + Message: "image ID '%s' is invalid ", + Description: "The specified image id is incorrectly formatted", + HTTPStatusCode: http.StatusInternalServerError, + }) +) diff --git a/vendor/github.com/docker/docker/errors/server.go b/vendor/github.com/docker/docker/errors/server.go new file mode 100644 index 00000000..1a7af00a --- /dev/null +++ b/vendor/github.com/docker/docker/errors/server.go @@ -0,0 +1,36 @@ +package errors + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + // ErrorCodeNewerClientVersion is generated when a request from a client + // specifies a higher version than the server supports. + ErrorCodeNewerClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NEWERCLIENTVERSION", + Message: "client is newer than server (client API version: %s, server API version: %s)", + Description: "The client version is higher than the server version", + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeOldClientVersion is generated when a request from a client + // specifies a version lower than the minimum version supported by the server. + ErrorCodeOldClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "OLDCLIENTVERSION", + Message: "client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", + Description: "The client version is too old for the server", + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorNetworkControllerNotEnabled is generated when the networking stack in not enabled + // for certain platforms, like windows. + ErrorNetworkControllerNotEnabled = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NETWORK_CONTROLLER_NOT_ENABLED", + Message: "the network controller is not enabled for this platform", + Description: "Docker's networking stack is disabled for this platform", + HTTPStatusCode: http.StatusNotFound, + }) +) diff --git a/vendor/github.com/docker/docker/graph/export.go b/vendor/github.com/docker/docker/graph/export.go index a57c81a8..3a47f318 100644 --- a/vendor/github.com/docker/docker/graph/export.go +++ b/vendor/github.com/docker/docker/graph/export.go @@ -2,32 +2,26 @@ package graph import ( "encoding/json" + "fmt" "io" "io/ioutil" "os" "path/filepath" + "time" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/registry" ) -// ImageExportConfig holds list of names to be exported to a output stream. -// All images with the given tag and all versions -// containing the same tag are exported. The resulting output is an -// uncompressed tar ball. -type ImageExportConfig struct { - // Names is the set of tags to export. - Names []string - // OutStream is the writer where the images are written to. - Outstream io.Writer -} - -// ImageExport exports list of images to a output stream specified in the config. -// The exported images are archived into a tar when written to the output stream. -func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error { - +// ImageExport exports list of images to a output stream specified in the +// config. The exported images are archived into a tar when written to the +// output stream. All images with the given tag and all versions containing the +// same tag are exported. names is the set of tags to export, and outStream +// is the writer which the images are written to. +func (s *TagStore) ImageExport(names []string, outStream io.Writer) error { // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { @@ -35,16 +29,16 @@ func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error { } defer os.RemoveAll(tempdir) - rootRepoMap := map[string]Repository{} + rootRepoMap := map[string]repository{} addKey := func(name string, tag string, id string) { logrus.Debugf("add key [%s:%s]", name, tag) if repo, ok := rootRepoMap[name]; !ok { - rootRepoMap[name] = Repository{tag: id} + rootRepoMap[name] = repository{tag: id} } else { repo[tag] = id } } - for _, name := range imageExportConfig.Names { + for _, name := range names { name = registry.NormalizeLocalName(name) logrus.Debugf("Serializing %s", name) rootRepo := s.Repositories[name] @@ -66,6 +60,11 @@ func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error { // This is a named image like 'busybox:latest' repoName, repoTag := parsers.ParseRepositoryTag(name) + // Skip digests on save + if _, err := digest.ParseDigest(repoTag); err == nil { + repoTag = "" + } + // check this length, because a lookup of a truncated has will not have a tag // and will not need to be added to this map if len(repoTag) > 0 { @@ -97,6 +96,9 @@ func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error { if err := f.Close(); err != nil { return err } + if err := os.Chtimes(filepath.Join(tempdir, "repositories"), time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } } else { logrus.Debugf("There were no repositories to write") } @@ -107,16 +109,20 @@ func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error { } defer fs.Close() - if _, err := io.Copy(imageExportConfig.Outstream, fs); err != nil { + if _, err := io.Copy(outStream, fs); err != nil { return err } logrus.Debugf("End export image") return nil } -// FIXME: this should be a top-level function, not a class method func (s *TagStore) exportImage(name, tempdir string) error { for n := name; n != ""; { + img, err := s.LookupImage(n) + if err != nil || img == nil { + return fmt.Errorf("No such image %s", n) + } + // temporary directory tmpImageDir := filepath.Join(tempdir, n) if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil { @@ -133,15 +139,17 @@ func (s *TagStore) exportImage(name, tempdir string) error { return err } + imageInspectRaw, err := json.Marshal(img) + if err != nil { + return err + } + // serialize json json, err := os.Create(filepath.Join(tmpImageDir, "json")) if err != nil { return err } - imageInspectRaw, err := s.lookupRaw(n) - if err != nil { - return err - } + written, err := json.Write(imageInspectRaw) if err != nil { return err @@ -155,15 +163,17 @@ func (s *TagStore) exportImage(name, tempdir string) error { if err != nil { return err } - if err := s.ImageTarLayer(n, fsTar); err != nil { + if err := s.imageTarLayer(n, fsTar); err != nil { return err } - // find parent - img, err := s.LookupImage(n) - if err != nil { - return err + for _, fname := range []string{"", "VERSION", "json", "layer.tar"} { + if err := os.Chtimes(filepath.Join(tmpImageDir, fname), img.Created, img.Created); err != nil { + return err + } } + + // try again with parent n = img.Parent } return nil diff --git a/vendor/github.com/docker/docker/graph/graph.go b/vendor/github.com/docker/docker/graph/graph.go index 4a6079c8..fed2ee02 100644 --- a/vendor/github.com/docker/docker/graph/graph.go +++ b/vendor/github.com/docker/docker/graph/graph.go @@ -2,7 +2,6 @@ package graph import ( "compress/gzip" - "crypto/sha256" "encoding/json" "errors" "fmt" @@ -18,20 +17,41 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" - "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) +// v1Descriptor is a non-content-addressable image descriptor +type v1Descriptor struct { + img *image.Image +} + +// ID returns the image ID specified in the image structure. +func (img v1Descriptor) ID() string { + return img.img.ID +} + +// Parent returns the parent ID specified in the image structure. +func (img v1Descriptor) Parent() string { + return img.img.Parent +} + +// MarshalConfig renders the image structure into JSON. +func (img v1Descriptor) MarshalConfig() ([]byte, error) { + return json.Marshal(img.img) +} + // The type is used to protect pulling or building related image // layers from deleteing when filtered by dangling=true // The key of layers is the images ID which is pulling or building @@ -77,46 +97,70 @@ func (r *retainedLayers) Exists(layerID string) bool { // A Graph is a store for versioned filesystem images and the relationship between them. type Graph struct { - root string - idIndex *truncindex.TruncIndex - driver graphdriver.Driver - imageMutex imageMutex // protect images in driver. - retained *retainedLayers + root string + idIndex *truncindex.TruncIndex + driver graphdriver.Driver + imagesMutex sync.Mutex + imageMutex locker.Locker // protect images in driver. + retained *retainedLayers + tarSplitDisabled bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + + // access to parentRefs must be protected with imageMutex locking the image id + // on the key of the map (e.g. imageMutex.Lock(img.ID), parentRefs[img.ID]...) + parentRefs map[string]int } // file names for ./graph// const ( - jsonFileName = "json" - layersizeFileName = "layersize" - digestFileName = "checksum" - tarDataFileName = "tar-data.json.gz" + jsonFileName = "json" + layersizeFileName = "layersize" + digestFileName = "checksum" + tarDataFileName = "tar-data.json.gz" + v1CompatibilityFileName = "v1Compatibility" + parentFileName = "parent" ) var ( - // ErrDigestNotSet is used when request the digest for a layer + // errDigestNotSet is used when request the digest for a layer // but the layer has no digest value or content to compute the // the digest. - ErrDigestNotSet = errors.New("digest is not set for layer") + errDigestNotSet = errors.New("digest is not set for layer") ) // NewGraph instantiates a new graph at the given root path in the filesystem. // `root` will be created if it doesn't exist. -func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) { +func NewGraph(root string, driver graphdriver.Driver, uidMaps, gidMaps []idtools.IDMap) (*Graph, error) { abspath, err := filepath.Abs(root) if err != nil { return nil, err } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } // Create the root directory if it doesn't exists - if err := system.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err } graph := &Graph{ - root: abspath, - idIndex: truncindex.NewTruncIndex([]string{}), - driver: driver, - retained: &retainedLayers{layerHolders: make(map[string]map[string]struct{})}, + root: abspath, + idIndex: truncindex.NewTruncIndex([]string{}), + driver: driver, + retained: &retainedLayers{layerHolders: make(map[string]map[string]struct{})}, + uidMaps: uidMaps, + gidMaps: gidMaps, + parentRefs: make(map[string]int), } + + // Windows does not currently support tarsplit functionality. + if runtime.GOOS == "windows" { + graph.tarSplitDisabled = true + } + if err := graph.restore(); err != nil { return nil, err } @@ -137,25 +181,28 @@ func (graph *Graph) restore() error { for _, v := range dir { id := v.Name() if graph.driver.Exists(id) { + img, err := graph.loadImage(id) + if err != nil { + logrus.Warnf("ignoring image %s, it could not be restored: %v", id, err) + continue + } + graph.imageMutex.Lock(img.Parent) + graph.parentRefs[img.Parent]++ + graph.imageMutex.Unlock(img.Parent) ids = append(ids, id) } } - baseIds, err := graph.restoreBaseImages() - if err != nil { - return err - } - ids = append(ids, baseIds...) - graph.idIndex = truncindex.NewTruncIndex(ids) logrus.Debugf("Restored %d elements", len(ids)) return nil } -// IsNotExist detects whether an image exists by parsing the incoming error message. -// FIXME: Implement error subclass instead of looking at the error text -// Note: This is the way golang implements os.IsNotExists on Plan9 +// IsNotExist detects whether an image exists by parsing the incoming error +// message. func (graph *Graph) IsNotExist(err error, id string) bool { + // FIXME: Implement error subclass instead of looking at the error text + // Note: This is the way golang implements os.IsNotExists on Plan9 return err != nil && (strings.Contains(strings.ToLower(err.Error()), "does not exist") || strings.Contains(strings.ToLower(err.Error()), "no such")) && strings.Contains(err.Error(), id) } @@ -172,7 +219,10 @@ func (graph *Graph) Exists(id string) bool { func (graph *Graph) Get(name string) (*image.Image, error) { id, err := graph.idIndex.Get(name) if err != nil { - return nil, fmt.Errorf("could not find image: %v", err) + if err == truncindex.ErrNotExist { + return nil, fmt.Errorf("image %s does not exist", name) + } + return nil, err } img, err := graph.loadImage(id) if err != nil { @@ -189,7 +239,7 @@ func (graph *Graph) Get(name string) (*image.Image, error) { } img.Size = size - if err := graph.saveSize(graph.imageRoot(id), int(img.Size)); err != nil { + if err := graph.saveSize(graph.imageRoot(id), img.Size); err != nil { return nil, err } } @@ -197,12 +247,12 @@ func (graph *Graph) Get(name string) (*image.Image, error) { } // Create creates a new image and registers it in the graph. -func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { +func (graph *Graph) Create(layerData io.Reader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { img := &image.Image{ ID: stringid.GenerateRandomID(), Comment: comment, Created: time.Now().UTC(), - DockerVersion: dockerversion.VERSION, + DockerVersion: dockerversion.Version, Author: author, Config: config, Architecture: runtime.GOARCH, @@ -215,23 +265,40 @@ func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, contain img.ContainerConfig = *containerConfig } - if err := graph.Register(img, layerData); err != nil { + if err := graph.Register(v1Descriptor{img}, layerData); err != nil { return nil, err } return img, nil } // Register imports a pre-existing image into the graph. -func (graph *Graph) Register(img *image.Image, layerData archive.ArchiveReader) (err error) { +// Returns nil if the image is already registered. +func (graph *Graph) Register(im image.Descriptor, layerData io.Reader) (err error) { + imgID := im.ID() - if err := image.ValidateID(img.ID); err != nil { + if err := image.ValidateID(imgID); err != nil { return err } + // this is needed cause pull_v2 attemptIDReuse could deadlock + graph.imagesMutex.Lock() + defer graph.imagesMutex.Unlock() + // We need this entire operation to be atomic within the engine. Note that // this doesn't mean Register is fully safe yet. - graph.imageMutex.Lock(img.ID) - defer graph.imageMutex.Unlock(img.ID) + graph.imageMutex.Lock(imgID) + defer graph.imageMutex.Unlock(imgID) + + return graph.register(im, layerData) +} + +func (graph *Graph) register(im image.Descriptor, layerData io.Reader) (err error) { + imgID := im.ID() + + // Skip register if image is already registered + if graph.Exists(imgID) { + return nil + } // The returned `error` must be named in this function's signature so that // `err` is not shadowed in this deferred cleanup. @@ -239,19 +306,14 @@ func (graph *Graph) Register(img *image.Image, layerData archive.ArchiveReader) // If any error occurs, remove the new dir from the driver. // Don't check for errors since the dir might not have been created. if err != nil { - graph.driver.Remove(img.ID) + graph.driver.Remove(imgID) } }() - // (This is a convenience to save time. Race conditions are taken care of by os.Rename) - if graph.Exists(img.ID) { - return fmt.Errorf("Image %s already exists", img.ID) - } - // Ensure that the image root does not exist on the filesystem // when it is not registered in the graph. // This is common when you switch from one graph driver to another - if err := os.RemoveAll(graph.imageRoot(img.ID)); err != nil && !os.IsNotExist(err) { + if err := os.RemoveAll(graph.imageRoot(imgID)); err != nil && !os.IsNotExist(err) { return err } @@ -259,44 +321,64 @@ func (graph *Graph) Register(img *image.Image, layerData archive.ArchiveReader) // (the graph is the source of truth). // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. // (FIXME: make that mandatory for drivers). - graph.driver.Remove(img.ID) + graph.driver.Remove(imgID) - tmp, err := graph.mktemp("") - defer os.RemoveAll(tmp) + tmp, err := graph.mktemp() if err != nil { - return fmt.Errorf("mktemp failed: %s", err) + return err } + defer os.RemoveAll(tmp) + + parent := im.Parent() // Create root filesystem in the driver - if err := createRootFilesystemInDriver(graph, img, layerData); err != nil { + if err := createRootFilesystemInDriver(graph, imgID, parent); err != nil { return err } // Apply the diff/layer - if err := graph.storeImage(img, layerData, tmp); err != nil { + config, err := im.MarshalConfig() + if err != nil { + return err + } + if err := graph.storeImage(imgID, parent, config, layerData, tmp); err != nil { return err } // Commit - if err := os.Rename(tmp, graph.imageRoot(img.ID)); err != nil { + if err := os.Rename(tmp, graph.imageRoot(imgID)); err != nil { return err } - graph.idIndex.Add(img.ID) + + graph.idIndex.Add(imgID) + + graph.imageMutex.Lock(parent) + graph.parentRefs[parent]++ + graph.imageMutex.Unlock(parent) + + return nil +} + +func createRootFilesystemInDriver(graph *Graph, id, parent string) error { + if err := graph.driver.Create(id, parent); err != nil { + return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, id, err) + } return nil } // TempLayerArchive creates a temporary archive of the given image's filesystem layer. // The archive is stored on disk and will be automatically deleted as soon as has been read. // If output is not nil, a human-readable progress bar will be written to it. -func (graph *Graph) TempLayerArchive(id string, sf *streamformatter.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { +func (graph *Graph) tempLayerArchive(id string, sf *streamformatter.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { image, err := graph.Get(id) if err != nil { return nil, err } - tmp, err := graph.mktemp("") + tmp, err := graph.mktemp() if err != nil { return nil, err } - a, err := graph.TarLayer(image) + defer os.RemoveAll(tmp) + a, err := graph.tarLayer(image) if err != nil { return nil, err } @@ -314,62 +396,49 @@ func (graph *Graph) TempLayerArchive(id string, sf *streamformatter.StreamFormat } // mktemp creates a temporary sub-directory inside the graph's filesystem. -func (graph *Graph) mktemp(id string) (string, error) { +func (graph *Graph) mktemp() (string, error) { dir := filepath.Join(graph.root, "_tmp", stringid.GenerateNonCryptoID()) - if err := system.MkdirAll(dir, 0700); err != nil { + rootUID, rootGID, err := idtools.GetRootUIDGID(graph.uidMaps, graph.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dir, 0700, rootUID, rootGID); err != nil { return "", err } return dir, nil } -func (graph *Graph) newTempFile() (*os.File, error) { - tmp, err := graph.mktemp("") - if err != nil { - return nil, err - } - return ioutil.TempFile(tmp, "") -} - -func bufferToFile(f *os.File, src io.Reader) (int64, digest.Digest, error) { - var ( - h = sha256.New() - w = gzip.NewWriter(io.MultiWriter(f, h)) - ) - _, err := io.Copy(w, src) - w.Close() - if err != nil { - return 0, "", err - } - n, err := f.Seek(0, os.SEEK_CUR) - if err != nil { - return 0, "", err - } - if _, err := f.Seek(0, 0); err != nil { - return 0, "", err - } - return n, digest.NewDigest("sha256", h), nil -} - // Delete atomically removes an image from the graph. func (graph *Graph) Delete(name string) error { id, err := graph.idIndex.Get(name) if err != nil { return err } - tmp, err := graph.mktemp("") + img, err := graph.Get(id) + if err != nil { + return err + } graph.idIndex.Delete(id) - if err == nil { + tmp, err := graph.mktemp() + if err != nil { + tmp = graph.imageRoot(id) + } else { if err := os.Rename(graph.imageRoot(id), tmp); err != nil { // On err make tmp point to old dir and cleanup unused tmp dir os.RemoveAll(tmp) tmp = graph.imageRoot(id) } - } else { - // On err make tmp point to old dir for cleanup - tmp = graph.imageRoot(id) } // Remove rootfs data from the driver graph.driver.Remove(id) + + graph.imageMutex.Lock(img.Parent) + graph.parentRefs[img.Parent]-- + if graph.parentRefs[img.Parent] == 0 { + delete(graph.parentRefs, img.Parent) + } + graph.imageMutex.Unlock(img.Parent) + // Remove the trashed image directory return os.RemoveAll(tmp) } @@ -387,16 +456,18 @@ func (graph *Graph) Map() map[string]*image.Image { // The walking order is undetermined. func (graph *Graph) walkAll(handler func(*image.Image)) { graph.idIndex.Iterate(func(id string) { - if img, err := graph.Get(id); err != nil { + img, err := graph.Get(id) + if err != nil { return - } else if handler != nil { + } + if handler != nil { handler(img) } }) } // ByParent returns a lookup table of images by their parent. -// If an image of id ID has 3 children images, then the value for key ID +// If an image of key ID has 3 children images, then the value for key ID // will be a list of 3 images. // If an image has no children, it will not have an entry in the table. func (graph *Graph) ByParent() map[string][]*image.Image { @@ -415,32 +486,48 @@ func (graph *Graph) ByParent() map[string][]*image.Image { return byParent } -// Retain keeps the images and layers that are in pulling chain so that they are not deleted. -// If not, they may be deleted by rmi with dangling condition. +// HasChildren returns whether the given image has any child images. +func (graph *Graph) HasChildren(imgID string) bool { + graph.imageMutex.Lock(imgID) + count := graph.parentRefs[imgID] + graph.imageMutex.Unlock(imgID) + return count > 0 +} + +// Retain keeps the images and layers that are in the pulling chain so that +// they are not deleted. If not retained, they may be deleted by rmi. func (graph *Graph) Retain(sessionID string, layerIDs ...string) { graph.retained.Add(sessionID, layerIDs) } -// Release removes the referenced image id from the provided set of layers. +// Release removes the referenced image ID from the provided set of layers. func (graph *Graph) Release(sessionID string, layerIDs ...string) { graph.retained.Delete(sessionID, layerIDs) } -// Heads returns all heads in the graph, keyed by id. +// heads returns all heads in the graph, keyed by id. // A head is an image which is not the parent of another image in the graph. -func (graph *Graph) Heads() map[string]*image.Image { +func (graph *Graph) heads() map[string]*image.Image { heads := make(map[string]*image.Image) - byParent := graph.ByParent() graph.walkAll(func(image *image.Image) { - // If it's not in the byParent lookup table, then - // it's not a parent -> so it's a head! - if _, exists := byParent[image.ID]; !exists { + // if it has no children, then it's not a parent, so it's an head + if !graph.HasChildren(image.ID) { heads[image.ID] = image } }) return heads } +// tarLayer returns a tar archive of the image's filesystem layer. +func (graph *Graph) tarLayer(img *image.Image) (arch io.ReadCloser, err error) { + rdr, err := graph.assembleTarLayer(img) + if err != nil { + logrus.Debugf("[graph] tarLayer with traditional differ: %s", img.ID) + return graph.driver.Diff(img.ID, img.Parent) + } + return rdr, nil +} + func (graph *Graph) imageRoot(id string) string { return filepath.Join(graph.root, id) } @@ -463,6 +550,21 @@ func (graph *Graph) loadImage(id string) (*image.Image, error) { if err := dec.Decode(img); err != nil { return nil, err } + + if img.ID == "" { + img.ID = id + } + + if img.Parent == "" && img.ParentID != "" && img.ParentID.Validate() == nil { + img.Parent = img.ParentID.Hex() + } + + // compatibilityID for parent + parent, err := ioutil.ReadFile(filepath.Join(root, parentFileName)) + if err == nil && len(parent) > 0 { + img.Parent = string(parent) + } + if err := image.ValidateID(img.ID); err != nil { return nil, err } @@ -489,15 +591,21 @@ func (graph *Graph) loadImage(id string) (*image.Image, error) { } // saveSize stores the `size` in the provided graph `img` directory `root`. -func (graph *Graph) saveSize(root string, size int) error { - if err := ioutil.WriteFile(filepath.Join(root, layersizeFileName), []byte(strconv.Itoa(size)), 0600); err != nil { +func (graph *Graph) saveSize(root string, size int64) error { + if err := ioutil.WriteFile(filepath.Join(root, layersizeFileName), []byte(strconv.FormatInt(size, 10)), 0600); err != nil { return fmt.Errorf("Error storing image size in %s/%s: %s", root, layersizeFileName, err) } return nil } -// SetDigest sets the digest for the image layer to the provided value. -func (graph *Graph) SetDigest(id string, dgst digest.Digest) error { +// setLayerDigestWithLock sets the digest for the image layer to the provided value. +func (graph *Graph) setLayerDigestWithLock(id string, dgst digest.Digest) error { + graph.imageMutex.Lock(id) + defer graph.imageMutex.Unlock(id) + + return graph.setLayerDigest(id, dgst) +} +func (graph *Graph) setLayerDigest(id string, dgst digest.Digest) error { root := graph.imageRoot(id) if err := ioutil.WriteFile(filepath.Join(root, digestFileName), []byte(dgst.String()), 0600); err != nil { return fmt.Errorf("Error storing digest in %s/%s: %s", root, digestFileName, err) @@ -505,66 +613,161 @@ func (graph *Graph) SetDigest(id string, dgst digest.Digest) error { return nil } -// GetDigest gets the digest for the provide image layer id. -func (graph *Graph) GetDigest(id string) (digest.Digest, error) { +// getLayerDigestWithLock gets the digest for the provide image layer id. +func (graph *Graph) getLayerDigestWithLock(id string) (digest.Digest, error) { + graph.imageMutex.Lock(id) + defer graph.imageMutex.Unlock(id) + + return graph.getLayerDigest(id) +} + +func (graph *Graph) getLayerDigest(id string) (digest.Digest, error) { root := graph.imageRoot(id) cs, err := ioutil.ReadFile(filepath.Join(root, digestFileName)) if err != nil { if os.IsNotExist(err) { - return "", ErrDigestNotSet + return "", errDigestNotSet } return "", err } return digest.ParseDigest(string(cs)) } -// RawJSON returns the JSON representation for an image as a byte array. -func (graph *Graph) RawJSON(id string) ([]byte, error) { +// setV1CompatibilityConfig stores the v1Compatibility JSON data associated +// with the image in the manifest to the disk +func (graph *Graph) setV1CompatibilityConfig(id string, data []byte) error { root := graph.imageRoot(id) + return ioutil.WriteFile(filepath.Join(root, v1CompatibilityFileName), data, 0600) +} - buf, err := ioutil.ReadFile(jsonPath(root)) - if err != nil { - return nil, fmt.Errorf("Failed to read json for image %s: %s", id, err) +// getV1CompatibilityConfig reads the v1Compatibility JSON data for the image +// from the disk +func (graph *Graph) getV1CompatibilityConfig(id string) ([]byte, error) { + root := graph.imageRoot(id) + return ioutil.ReadFile(filepath.Join(root, v1CompatibilityFileName)) +} + +// generateV1CompatibilityChain makes sure v1Compatibility JSON data exists +// for the image. If it doesn't it generates and stores it for the image and +// all of it's parents based on the image config JSON. +func (graph *Graph) generateV1CompatibilityChain(id string) ([]byte, error) { + graph.imageMutex.Lock(id) + defer graph.imageMutex.Unlock(id) + + if v1config, err := graph.getV1CompatibilityConfig(id); err == nil { + return v1config, nil } - return buf, nil + // generate new, store it to disk + img, err := graph.Get(id) + if err != nil { + return nil, err + } + + digestPrefix := string(digest.Canonical) + ":" + img.ID = strings.TrimPrefix(img.ID, digestPrefix) + + if img.Parent != "" { + parentConfig, err := graph.generateV1CompatibilityChain(img.Parent) + if err != nil { + return nil, err + } + var parent struct{ ID string } + err = json.Unmarshal(parentConfig, &parent) + if err != nil { + return nil, err + } + img.Parent = parent.ID + } + + json, err := json.Marshal(img) + if err != nil { + return nil, err + } + if err := graph.setV1CompatibilityConfig(id, json); err != nil { + return nil, err + } + return json, nil } func jsonPath(root string) string { return filepath.Join(root, jsonFileName) } -func (graph *Graph) disassembleAndApplyTarLayer(img *image.Image, layerData archive.ArchiveReader, root string) error { - // this is saving the tar-split metadata - mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) - if err != nil { +// storeImage stores file system layer data for the given image to the +// graph's storage driver. Image metadata is stored in a file +// at the specified root directory. +func (graph *Graph) storeImage(id, parent string, config []byte, layerData io.Reader, root string) (err error) { + var size int64 + // Store the layer. If layerData is not nil, unpack it into the new layer + if layerData != nil { + if size, err = graph.disassembleAndApplyTarLayer(id, parent, layerData, root); err != nil { + return err + } + } + + if err := graph.saveSize(root, size); err != nil { return err } - mfz := gzip.NewWriter(mf) - metaPacker := storage.NewJSONPacker(mfz) - defer mf.Close() - defer mfz.Close() - inflatedLayerData, err := archive.DecompressStream(layerData) + if err := ioutil.WriteFile(jsonPath(root), config, 0600); err != nil { + return err + } + + // If image is pointing to a parent via CompatibilityID write the reference to disk + img, err := image.NewImgJSON(config) if err != nil { return err } - // we're passing nil here for the file putter, because the ApplyDiff will - // handle the extraction of the archive - rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil) - if err != nil { - return err + if img.ParentID.Validate() == nil && parent != img.ParentID.Hex() { + if err := ioutil.WriteFile(filepath.Join(root, parentFileName), []byte(parent), 0600); err != nil { + return err + } } - - if img.Size, err = graph.driver.ApplyDiff(img.ID, img.Parent, archive.ArchiveReader(rdr)); err != nil { - return err - } - return nil } -func (graph *Graph) assembleTarLayer(img *image.Image) (archive.Archive, error) { +func (graph *Graph) disassembleAndApplyTarLayer(id, parent string, layerData io.Reader, root string) (size int64, err error) { + var ar io.Reader + + if graph.tarSplitDisabled { + ar = layerData + } else { + // this is saving the tar-split metadata + mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) + if err != nil { + return 0, err + } + + mfz := gzip.NewWriter(mf) + metaPacker := storage.NewJSONPacker(mfz) + defer mf.Close() + defer mfz.Close() + + inflatedLayerData, err := archive.DecompressStream(layerData) + if err != nil { + return 0, err + } + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil) + if err != nil { + return 0, err + } + + ar = archive.Reader(rdr) + } + + if size, err = graph.driver.ApplyDiff(id, parent, ar); err != nil { + return 0, err + } + + return +} + +func (graph *Graph) assembleTarLayer(img *image.Image) (io.ReadCloser, error) { root := graph.imageRoot(img.ID) mFileName := filepath.Join(root, tarDataFileName) mf, err := os.Open(mFileName) diff --git a/vendor/github.com/docker/docker/graph/graph_test.go b/vendor/github.com/docker/docker/graph/graph_test.go index 63ef7357..3e0ba3c6 100644 --- a/vendor/github.com/docker/docker/graph/graph_test.go +++ b/vendor/github.com/docker/docker/graph/graph_test.go @@ -9,8 +9,8 @@ import ( "testing" "time" - "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" ) @@ -73,7 +73,7 @@ func TestInterruptedRegister(t *testing.T) { Created: time.Now(), } w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) - graph.Register(image, badArchive) + graph.Register(v1Descriptor{image}, badArchive) if _, err := graph.Get(image.ID); err == nil { t.Fatal("Image should not exist after Register is interrupted") } @@ -82,7 +82,7 @@ func TestInterruptedRegister(t *testing.T) { if err != nil { t.Fatal(err) } - if err := graph.Register(image, goodArchive); err != nil { + if err := graph.Register(v1Descriptor{image}, goodArchive); err != nil { t.Fatal(err) } } @@ -106,8 +106,8 @@ func TestGraphCreate(t *testing.T) { if img.Comment != "Testing" { t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment) } - if img.DockerVersion != dockerversion.VERSION { - t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, img.DockerVersion) + if img.DockerVersion != dockerversion.Version { + t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.Version, img.DockerVersion) } images := graph.Map() if l := len(images); l != 1 { @@ -130,7 +130,7 @@ func TestRegister(t *testing.T) { Comment: "testing", Created: time.Now(), } - err = graph.Register(image, archive) + err = graph.Register(v1Descriptor{image}, archive) if err != nil { t.Fatal(err) } @@ -212,7 +212,7 @@ func TestDelete(t *testing.T) { t.Fatal(err) } // Test delete twice (pull -> rm -> pull -> rm) - if err := graph.Register(img1, archive); err != nil { + if err := graph.Register(v1Descriptor{img1}, archive); err != nil { t.Fatal(err) } if err := graph.Delete(img1.ID); err != nil { @@ -246,9 +246,19 @@ func TestByParent(t *testing.T) { Created: time.Now(), Parent: parentImage.ID, } - _ = graph.Register(parentImage, archive1) - _ = graph.Register(childImage1, archive2) - _ = graph.Register(childImage2, archive3) + + err := graph.Register(v1Descriptor{parentImage}, archive1) + if err != nil { + t.Fatal(err) + } + err = graph.Register(v1Descriptor{childImage1}, archive2) + if err != nil { + t.Fatal(err) + } + err = graph.Register(v1Descriptor{childImage2}, archive3) + if err != nil { + t.Fatal(err) + } byParent := graph.ByParent() numChildren := len(byParent[parentImage.ID]) @@ -281,11 +291,11 @@ func tempGraph(t *testing.T) (*Graph, graphdriver.Driver) { if err != nil { t.Fatal(err) } - driver, err := graphdriver.New(tmp, nil) + driver, err := graphdriver.New(tmp, nil, nil, nil) if err != nil { t.Fatal(err) } - graph, err := NewGraph(tmp, driver) + graph, err := NewGraph(tmp, driver, nil, nil) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/graph/graph_unix.go b/vendor/github.com/docker/docker/graph/graph_unix.go deleted file mode 100644 index a07807d2..00000000 --- a/vendor/github.com/docker/docker/graph/graph_unix.go +++ /dev/null @@ -1,120 +0,0 @@ -// +build !windows - -package graph - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" -) - -// SetupInitLayer populates a directory with mountpoints suitable -// for bind-mounting dockerinit into the container. The mountpoint is simply an -// empty file at /.dockerinit -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func SetupInitLayer(initLayer string) error { - for pth, typ := range map[string]string{ - "/dev/pts": "dir", - "/dev/shm": "dir", - "/proc": "dir", - "/sys": "dir", - "/.dockerinit": "file", - "/.dockerenv": "file", - "/etc/resolv.conf": "file", - "/etc/hosts": "file", - "/etc/hostname": "file", - "/dev/console": "file", - "/etc/mtab": "/proc/mounts", - } { - parts := strings.Split(pth, "/") - prev := "/" - for _, p := range parts[1:] { - prev = filepath.Join(prev, p) - syscall.Unlink(filepath.Join(initLayer, prev)) - } - - if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { - if os.IsNotExist(err) { - if err := system.MkdirAll(filepath.Join(initLayer, filepath.Dir(pth)), 0755); err != nil { - return err - } - switch typ { - case "dir": - if err := system.MkdirAll(filepath.Join(initLayer, pth), 0755); err != nil { - return err - } - case "file": - f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - default: - if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { - return err - } - } - } else { - return err - } - } - } - - // Layer is ready to use, if it wasn't before. - return nil -} - -func createRootFilesystemInDriver(graph *Graph, img *image.Image, layerData archive.ArchiveReader) error { - if err := graph.driver.Create(img.ID, img.Parent); err != nil { - return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) - } - return nil -} - -func (graph *Graph) restoreBaseImages() ([]string, error) { - return nil, nil -} - -// storeImage stores file system layer data for the given image to the -// graph's storage driver. Image metadata is stored in a file -// at the specified root directory. -func (graph *Graph) storeImage(img *image.Image, layerData archive.ArchiveReader, root string) (err error) { - // Store the layer. If layerData is not nil, unpack it into the new layer - if layerData != nil { - if err := graph.disassembleAndApplyTarLayer(img, layerData, root); err != nil { - return err - } - } - - if err := graph.saveSize(root, int(img.Size)); err != nil { - return err - } - - f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) - if err != nil { - return err - } - - defer f.Close() - - return json.NewEncoder(f).Encode(img) -} - -// TarLayer returns a tar archive of the image's filesystem layer. -func (graph *Graph) TarLayer(img *image.Image) (arch archive.Archive, err error) { - rdr, err := graph.assembleTarLayer(img) - if err != nil { - logrus.Debugf("[graph] TarLayer with traditional differ: %s", img.ID) - return graph.driver.Diff(img.ID, img.Parent) - } - return rdr, nil -} diff --git a/vendor/github.com/docker/docker/graph/graph_windows.go b/vendor/github.com/docker/docker/graph/graph_windows.go deleted file mode 100644 index a19b356f..00000000 --- a/vendor/github.com/docker/docker/graph/graph_windows.go +++ /dev/null @@ -1,164 +0,0 @@ -// +build windows - -package graph - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver/windows" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/archive" -) - -// SetupInitLayer populates a directory with mountpoints suitable -// for bind-mounting dockerinit into the container. T -func SetupInitLayer(initLayer string) error { - return nil -} - -func createRootFilesystemInDriver(graph *Graph, img *image.Image, layerData archive.ArchiveReader) error { - if wd, ok := graph.driver.(*windows.WindowsGraphDriver); ok { - if img.Container != "" && layerData == nil { - logrus.Debugf("Copying from container %s.", img.Container) - - var ids []string - if img.Parent != "" { - parentImg, err := graph.Get(img.Parent) - if err != nil { - return err - } - - ids, err = graph.ParentLayerIds(parentImg) - if err != nil { - return err - } - } - - if err := wd.CopyDiff(img.Container, img.ID, wd.LayerIdsToPaths(ids)); err != nil { - return fmt.Errorf("Driver %s failed to copy image rootfs %s: %s", graph.driver, img.Container, err) - } - } else if img.Parent == "" { - if err := graph.driver.Create(img.ID, img.Parent); err != nil { - return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) - } - } - } else { - // This fallback allows the use of VFS during daemon development. - if err := graph.driver.Create(img.ID, img.Parent); err != nil { - return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) - } - } - return nil -} - -func (graph *Graph) restoreBaseImages() ([]string, error) { - // TODO Windows. This needs implementing (@swernli) - return nil, nil -} - -// ParentLayerIds returns a list of all parent image IDs for the given image. -func (graph *Graph) ParentLayerIds(img *image.Image) (ids []string, err error) { - for i := img; i != nil && err == nil; i, err = graph.GetParent(i) { - ids = append(ids, i.ID) - } - - return -} - -// storeImage stores file system layer data for the given image to the -// graph's storage driver. Image metadata is stored in a file -// at the specified root directory. -func (graph *Graph) storeImage(img *image.Image, layerData archive.ArchiveReader, root string) (err error) { - - if wd, ok := graph.driver.(*windows.WindowsGraphDriver); ok { - // Store the layer. If layerData is not nil and this isn't a base image, - // unpack it into the new layer - if layerData != nil && img.Parent != "" { - var ids []string - if img.Parent != "" { - parentImg, err := graph.Get(img.Parent) - if err != nil { - return err - } - - ids, err = graph.ParentLayerIds(parentImg) - if err != nil { - return err - } - } - - if img.Size, err = wd.Import(img.ID, layerData, wd.LayerIdsToPaths(ids)); err != nil { - return err - } - } - - if err := graph.saveSize(root, int(img.Size)); err != nil { - return err - } - - f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) - if err != nil { - return err - } - - defer f.Close() - - return json.NewEncoder(f).Encode(img) - } - // We keep this functionality here so that we can still work with the - // VFS driver during development. This will not be used for actual running - // of Windows containers. Without this code, it would not be possible to - // docker pull using the VFS driver. - - // Store the layer. If layerData is not nil, unpack it into the new layer - if layerData != nil { - if err := graph.disassembleAndApplyTarLayer(img, layerData, root); err != nil { - return err - } - } - - if err := graph.saveSize(root, int(img.Size)); err != nil { - return err - } - - f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) - if err != nil { - return err - } - - defer f.Close() - - return json.NewEncoder(f).Encode(img) -} - -// TarLayer returns a tar archive of the image's filesystem layer. -func (graph *Graph) TarLayer(img *image.Image) (arch archive.Archive, err error) { - if wd, ok := graph.driver.(*windows.WindowsGraphDriver); ok { - var ids []string - if img.Parent != "" { - parentImg, err := graph.Get(img.Parent) - if err != nil { - return nil, err - } - - ids, err = graph.ParentLayerIds(parentImg) - if err != nil { - return nil, err - } - } - - return wd.Export(img.ID, wd.LayerIdsToPaths(ids)) - } - // We keep this functionality here so that we can still work with the VFS - // driver during development. VFS is not supported (and just will not work) - // for Windows containers. - rdr, err := graph.assembleTarLayer(img) - if err != nil { - logrus.Debugf("[graph] TarLayer with traditional differ: %s", img.ID) - return graph.driver.Diff(img.ID, img.Parent) - } - return rdr, nil -} diff --git a/vendor/github.com/docker/docker/graph/history.go b/vendor/github.com/docker/docker/graph/history.go index 390a4873..de702987 100644 --- a/vendor/github.com/docker/docker/graph/history.go +++ b/vendor/github.com/docker/docker/graph/history.go @@ -9,9 +9,9 @@ import ( "github.com/docker/docker/utils" ) -// WalkHistory calls the handler function for each image in the +// walkHistory calls the handler function for each image in the // provided images lineage starting from immediate parent. -func (graph *Graph) WalkHistory(img *image.Image, handler func(image.Image) error) (err error) { +func (graph *Graph) walkHistory(img *image.Image, handler func(image.Image) error) (err error) { currentImg := img for currentImg != nil { if handler != nil { @@ -27,8 +27,7 @@ func (graph *Graph) WalkHistory(img *image.Image, handler func(image.Image) erro return nil } -// depth returns the number of parents for a -// current image +// depth returns the number of parents for the current image func (graph *Graph) depth(img *image.Image) (int, error) { var ( count = 0 @@ -38,18 +37,16 @@ func (graph *Graph) depth(img *image.Image) (int, error) { for parent != nil { count++ - parent, err = graph.GetParent(parent) - if err != nil { + if parent, err = graph.GetParent(parent); err != nil { return -1, err } } return count, nil } -// Set the max depth to the aufs default that most -// kernels are compiled with +// Set the max depth to the aufs default that most kernels are compiled with. // For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk -const MaxImageDepth = 127 +const maxImageDepth = 127 // CheckDepth returns an error if the depth of an image, as returned // by ImageDepth, is too large to support creating a container from it @@ -61,13 +58,14 @@ func (graph *Graph) CheckDepth(img *image.Image) error { if err != nil { return err } - if depth+2 >= MaxImageDepth { - return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) + if depth+2 >= maxImageDepth { + return fmt.Errorf("Cannot create container with more than %d parents", maxImageDepth) } return nil } -// History returns a list of ImageHistory for the specified image name by walking the image lineage. +// History returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. func (s *TagStore) History(name string) ([]*types.ImageHistory, error) { foundImage, err := s.LookupImage(name) if err != nil { @@ -87,7 +85,7 @@ func (s *TagStore) History(name string) ([]*types.ImageHistory, error) { history := []*types.ImageHistory{} - err = s.graph.WalkHistory(foundImage, func(img image.Image) error { + err = s.graph.walkHistory(foundImage, func(img image.Image) error { history = append(history, &types.ImageHistory{ ID: img.ID, Created: img.Created.Unix(), @@ -102,7 +100,7 @@ func (s *TagStore) History(name string) ([]*types.ImageHistory, error) { return history, err } -// GetParent returns the parent image. +// GetParent returns the parent image for the specified image. func (graph *Graph) GetParent(img *image.Image) (*image.Image, error) { if img.Parent == "" { return nil, nil @@ -110,12 +108,12 @@ func (graph *Graph) GetParent(img *image.Image) (*image.Image, error) { return graph.Get(img.Parent) } -// GetParentsSize returns the size of the parent. -func (graph *Graph) GetParentsSize(img *image.Image, size int64) int64 { +// getParentsSize returns the combined size of all parent images. If there is +// no parent image or it's unavailable, it returns 0. +func (graph *Graph) getParentsSize(img *image.Image) int64 { parentImage, err := graph.GetParent(img) if err != nil || parentImage == nil { - return size + return 0 } - size += parentImage.Size - return graph.GetParentsSize(parentImage, size) + return parentImage.Size + graph.getParentsSize(parentImage) } diff --git a/vendor/github.com/docker/docker/graph/import.go b/vendor/github.com/docker/docker/graph/import.go index 33207c9b..1a09dec4 100644 --- a/vendor/github.com/docker/docker/graph/import.go +++ b/vendor/github.com/docker/docker/graph/import.go @@ -5,7 +5,6 @@ import ( "net/http" "net/url" - "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" @@ -13,31 +12,21 @@ import ( "github.com/docker/docker/utils" ) -// ImageImportConfig holds configuration to import a image. -type ImageImportConfig struct { - // Changes are the container changes written to top layer. - Changes []string - // InConfig is the input stream containers layered data. - InConfig io.ReadCloser - // OutStream is the output stream where the image is written. - OutStream io.Writer - // ContainerConfig is the configuration of commit container. - ContainerConfig *runconfig.Config -} - -// Import allows to download image from a archive. -// If the src is a URL, the content is downloaded from the archive. If the source is '-' then the imageImportConfig.InConfig -// reader will be used to load the image. Once all the layers required are loaded locally, image is then tagged using the tag specified. -func (s *TagStore) Import(src string, repo string, tag string, imageImportConfig *ImageImportConfig) error { +// Import imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (s *TagStore) Import(src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error { var ( sf = streamformatter.NewJSONStreamFormatter() - archive archive.ArchiveReader + archive io.ReadCloser resp *http.Response ) if src == "-" { - archive = imageImportConfig.InConfig + archive = inConfig } else { + inConfig.Close() u, err := url.Parse(src) if err != nil { return err @@ -47,25 +36,29 @@ func (s *TagStore) Import(src string, repo string, tag string, imageImportConfig u.Host = src u.Path = "" } - imageImportConfig.OutStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = httputils.Download(u.String()) if err != nil { return err } progressReader := progressreader.New(progressreader.Config{ In: resp.Body, - Out: imageImportConfig.OutStream, + Out: outStream, Formatter: sf, - Size: int(resp.ContentLength), + Size: resp.ContentLength, NewLines: true, ID: "", Action: "Importing", }) - defer progressReader.Close() archive = progressReader } - img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, imageImportConfig.ContainerConfig) + defer archive.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + img, err := s.graph.Create(archive, "", "", msg, "", nil, containerConfig) if err != nil { return err } @@ -75,7 +68,7 @@ func (s *TagStore) Import(src string, repo string, tag string, imageImportConfig return err } } - imageImportConfig.OutStream.Write(sf.FormatStatus("", img.ID)) + outStream.Write(sf.FormatStatus("", img.ID)) logID := img.ID if tag != "" { logID = utils.ImageReference(logID, tag) diff --git a/vendor/github.com/docker/docker/graph/list.go b/vendor/github.com/docker/docker/graph/list.go index 28c83bc2..675110ff 100644 --- a/vendor/github.com/docker/docker/graph/list.go +++ b/vendor/github.com/docker/docker/graph/list.go @@ -18,25 +18,20 @@ var acceptedImageFilterTags = map[string]struct{}{ "label": {}, } -// ImagesConfig defines the criteria to obtain a list of images. -type ImagesConfig struct { - // Filters is supported list of filters used to get list of images. - Filters string - // Filter the list of images by name. - Filter string - // All inditest that all the images will be returned in the list, if set to true. - All bool -} - -// byCreated is a temporary type used to sort list of images on their field 'Created'. +// byCreated is a temporary type used to sort a list of images by creation +// time. type byCreated []*types.Image func (r byCreated) Len() int { return len(r) } func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } -// Images provide list of images based on selection criteria. -func (s *TagStore) Images(config *ImagesConfig) ([]*types.Image, error) { +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by pkg/parsers/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (s *TagStore) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { var ( allImages map[string]*image.Image err error @@ -44,7 +39,7 @@ func (s *TagStore) Images(config *ImagesConfig) ([]*types.Image, error) { filtLabel = false ) - imageFilters, err := filters.FromParam(config.Filters) + imageFilters, err := filters.FromParam(filterArgs) if err != nil { return nil, err } @@ -56,30 +51,48 @@ func (s *TagStore) Images(config *ImagesConfig) ([]*types.Image, error) { if i, ok := imageFilters["dangling"]; ok { for _, value := range i { - if strings.ToLower(value) == "true" { + if v := strings.ToLower(value); v == "true" { filtTagged = false + } else if v != "false" { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", v) } } } _, filtLabel = imageFilters["label"] - if config.All && filtTagged { + if all && filtTagged { allImages = s.graph.Map() } else { - allImages = s.graph.Heads() + allImages = s.graph.heads() } lookup := make(map[string]*types.Image) s.Lock() for repoName, repository := range s.Repositories { - if config.Filter != "" { - if match, _ := path.Match(config.Filter, repoName); !match { + filterTagName := "" + if filter != "" { + filterName := filter + // Test if the tag was in there, if yes, get the name + if strings.Contains(filterName, ":") { + filterWithTag := strings.Split(filter, ":") + filterName = filterWithTag[0] + filterTagName = filterWithTag[1] + } + if match, _ := path.Match(filterName, repoName); !match { continue } + if filterTagName != "" { + if _, ok := repository[filterTagName]; !ok { + continue + } + } } for ref, id := range repository { imgRef := utils.ImageReference(repoName, ref) + if !strings.Contains(imgRef, filterTagName) { + continue + } image, err := s.graph.Get(id) if err != nil { logrus.Warnf("couldn't load %s from %s: %s", id, imgRef, err) @@ -97,17 +110,19 @@ func (s *TagStore) Images(config *ImagesConfig) ([]*types.Image, error) { } else { // get the boolean list for if only the untagged images are requested delete(allImages, id) - if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) { - continue + + if len(imageFilters["label"]) > 0 { + if image.Config == nil { + // Very old image that do not have image.Config (or even labels) + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", image.Config.Labels) { + continue + } } if filtTagged { - newImage := new(types.Image) - newImage.ParentId = image.Parent - newImage.ID = image.ID - newImage.Created = int(image.Created.Unix()) - newImage.Size = int(image.Size) - newImage.VirtualSize = int(s.graph.GetParentsSize(image, 0) + image.Size) - newImage.Labels = image.ContainerConfig.Labels + newImage := newImage(image, s.graph.getParentsSize(image)) if utils.DigestReference(ref) { newImage.RepoTags = []string{} @@ -131,20 +146,21 @@ func (s *TagStore) Images(config *ImagesConfig) ([]*types.Image, error) { } // Display images which aren't part of a repository/tag - if config.Filter == "" || filtLabel { + if filter == "" || filtLabel { for _, image := range allImages { - if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) { - continue + if len(imageFilters["label"]) > 0 { + if image.Config == nil { + // Very old image that do not have image.Config (or even labels) + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", image.Config.Labels) { + continue + } } - newImage := new(types.Image) - newImage.ParentId = image.Parent + newImage := newImage(image, s.graph.getParentsSize(image)) newImage.RepoTags = []string{":"} newImage.RepoDigests = []string{"@"} - newImage.ID = image.ID - newImage.Created = int(image.Created.Unix()) - newImage.Size = int(image.Size) - newImage.VirtualSize = int(s.graph.GetParentsSize(image, 0) + image.Size) - newImage.Labels = image.ContainerConfig.Labels images = append(images, newImage) } @@ -154,3 +170,16 @@ func (s *TagStore) Images(config *ImagesConfig) ([]*types.Image, error) { return images, nil } + +func newImage(image *image.Image, parentSize int64) *types.Image { + newImage := new(types.Image) + newImage.ParentID = image.Parent + newImage.ID = image.ID + newImage.Created = image.Created.Unix() + newImage.Size = image.Size + newImage.VirtualSize = parentSize + image.Size + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/vendor/github.com/docker/docker/graph/load.go b/vendor/github.com/docker/docker/graph/load.go index 4d133c3a..3be3a8e8 100644 --- a/vendor/github.com/docker/docker/graph/load.go +++ b/vendor/github.com/docker/docker/graph/load.go @@ -64,14 +64,14 @@ func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error { } defer reposJSONFile.Close() - repositories := map[string]Repository{} + repositories := map[string]repository{} if err := json.NewDecoder(reposJSONFile).Decode(&repositories); err != nil { return err } for imageName, tagMap := range repositories { for tag, address := range tagMap { - if err := s.SetLoad(imageName, tag, address, true, outStream); err != nil { + if err := s.setLoad(imageName, tag, address, true, outStream); err != nil { return err } } @@ -106,17 +106,14 @@ func (s *TagStore) recursiveLoad(address, tmpImageDir string) error { } // ensure no two downloads of the same layer happen at the same time - if c, err := s.poolAdd("pull", "layer:"+img.ID); err != nil { - if c != nil { - logrus.Debugf("Image (id: %s) load is already running, waiting: %v", img.ID, err) - <-c - return nil - } - - return err + poolKey := "layer:" + img.ID + broadcaster, found := s.poolAdd("pull", poolKey) + if found { + logrus.Debugf("Image (id: %s) load is already running, waiting", img.ID) + return broadcaster.Wait() } - defer s.poolRemove("pull", "layer:"+img.ID) + defer s.poolRemove("pull", poolKey) if img.Parent != "" { if !s.graph.Exists(img.Parent) { @@ -125,11 +122,13 @@ func (s *TagStore) recursiveLoad(address, tmpImageDir string) error { } } } - if err := s.graph.Register(img, layer); err != nil { + if err := s.graph.Register(v1Descriptor{img}, layer); err != nil { return err } + logrus.Debugf("Completed processing %s", address) + return nil } - logrus.Debugf("Completed processing %s", address) + logrus.Debugf("already loaded %s", address) return nil } diff --git a/vendor/github.com/docker/docker/graph/mutex.go b/vendor/github.com/docker/docker/graph/mutex.go deleted file mode 100644 index a5f3991b..00000000 --- a/vendor/github.com/docker/docker/graph/mutex.go +++ /dev/null @@ -1,45 +0,0 @@ -package graph - -import "sync" - -// imageMutex provides a lock per image id to protect shared resources in the -// graph. This is only used with registration but should be used when -// manipulating the layer store. -type imageMutex struct { - mus map[string]*sync.Mutex // mutexes by image id. - mu sync.Mutex // protects lock map - - // NOTE(stevvooe): The map above will grow to the size of all images ever - // registered during a daemon run. To free these resources, we must - // deallocate after unlock. Doing this safely is non-trivial in the face - // of a very minor leak. -} - -// Lock the provided id. -func (im *imageMutex) Lock(id string) { - im.getImageLock(id).Lock() -} - -// Unlock the provided id. -func (im *imageMutex) Unlock(id string) { - im.getImageLock(id).Unlock() -} - -// getImageLock returns the mutex for the given id. This method will never -// return nil. -func (im *imageMutex) getImageLock(id string) *sync.Mutex { - im.mu.Lock() - defer im.mu.Unlock() - - if im.mus == nil { // lazy - im.mus = make(map[string]*sync.Mutex) - } - - mu, ok := im.mus[id] - if !ok { - mu = new(sync.Mutex) - im.mus[id] = mu - } - - return mu -} diff --git a/vendor/github.com/docker/docker/graph/pools_test.go b/vendor/github.com/docker/docker/graph/pools_test.go index 129a5e1f..6382c155 100644 --- a/vendor/github.com/docker/docker/graph/pools_test.go +++ b/vendor/github.com/docker/docker/graph/pools_test.go @@ -3,6 +3,7 @@ package graph import ( "testing" + "github.com/docker/docker/pkg/broadcaster" "github.com/docker/docker/pkg/reexec" ) @@ -12,24 +13,21 @@ func init() { func TestPools(t *testing.T) { s := &TagStore{ - pullingPool: make(map[string]chan struct{}), - pushingPool: make(map[string]chan struct{}), + pullingPool: make(map[string]*broadcaster.Buffered), + pushingPool: make(map[string]*broadcaster.Buffered), } - if _, err := s.poolAdd("pull", "test1"); err != nil { - t.Fatal(err) + if _, found := s.poolAdd("pull", "test1"); found { + t.Fatal("Expected pull test1 not to be in progress") } - if _, err := s.poolAdd("pull", "test2"); err != nil { - t.Fatal(err) + if _, found := s.poolAdd("pull", "test2"); found { + t.Fatal("Expected pull test2 not to be in progress") } - if _, err := s.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") + if _, found := s.poolAdd("push", "test1"); !found { + t.Fatalf("Expected pull test1 to be in progress`") } - if _, err := s.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") - } - if _, err := s.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") + if _, found := s.poolAdd("pull", "test1"); !found { + t.Fatalf("Expected pull test1 to be in progress`") } if err := s.poolRemove("pull", "test2"); err != nil { t.Fatal(err) @@ -43,7 +41,4 @@ func TestPools(t *testing.T) { if err := s.poolRemove("push", "test1"); err != nil { t.Fatal(err) } - if err := s.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") - } } diff --git a/vendor/github.com/docker/docker/graph/pull.go b/vendor/github.com/docker/docker/graph/pull.go index 0cfc457f..ce600479 100644 --- a/vendor/github.com/docker/docker/graph/pull.go +++ b/vendor/github.com/docker/docker/graph/pull.go @@ -13,16 +13,19 @@ import ( // ImagePullConfig stores pull configuration. type ImagePullConfig struct { - // MetaHeaders store meta data about the image (DockerHeaders with prefix X-Meta- in the request). + // MetaHeaders stores HTTP headers with metadata about the image + // (DockerHeaders with prefix X-Meta- in the request). MetaHeaders map[string][]string - // AuthConfig holds authentication information for authorizing with the registry. + // AuthConfig holds authentication credentials for authenticating with + // the registry. AuthConfig *cliconfig.AuthConfig - // OutStream is the output writer for showing the status of the pull operation. + // OutStream is the output writer for showing the status of the pull + // operation. OutStream io.Writer } -// Puller is an interface to define Pull behavior. -type Puller interface { +// puller is an interface that abstracts pulling for different API versions. +type puller interface { // Pull tries to pull the image referenced by `tag` // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. // @@ -30,8 +33,12 @@ type Puller interface { Pull(tag string) (fallback bool, err error) } -// NewPuller returns a new instance of an implementation conforming to Puller interface. -func NewPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig, sf *streamformatter.StreamFormatter) (Puller, error) { +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig, sf *streamformatter.StreamFormatter) (puller, error) { switch endpoint.Version { case registry.APIVersion2: return &v2Puller{ @@ -53,7 +60,8 @@ func NewPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.Re return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) } -// Pull downloads a image with specified name and tag from the repo. +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error { var sf = streamformatter.NewJSONStreamFormatter() @@ -68,7 +76,7 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf return err } - endpoints, err := s.registryService.LookupEndpoints(repoInfo.CanonicalName) + endpoints, err := s.registryService.LookupPullEndpoints(repoInfo.CanonicalName) if err != nil { return err } @@ -93,13 +101,7 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf for _, endpoint := range endpoints { logrus.Debugf("Trying to pull %s from %s %s", repoInfo.LocalName, endpoint.URL, endpoint.Version) - if !endpoint.Mirror && (endpoint.Official || endpoint.Version == registry.APIVersion2) { - if repoInfo.Official { - s.trustService.UpdateBase() - } - } - - puller, err := NewPuller(s, endpoint, repoInfo, imagePullConfig, sf) + puller, err := newPuller(s, endpoint, repoInfo, imagePullConfig, sf) if err != nil { lastErr = err continue @@ -133,7 +135,10 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf return lastErr } -// writeStatus shows status of the pull command. +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. func writeStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layersDownloaded bool) { if layersDownloaded { out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag)) diff --git a/vendor/github.com/docker/docker/graph/pull_v1.go b/vendor/github.com/docker/docker/graph/pull_v1.go index 5eb75372..51150dbe 100644 --- a/vendor/github.com/docker/docker/graph/pull_v1.go +++ b/vendor/github.com/docker/docker/graph/pull_v1.go @@ -3,6 +3,7 @@ package graph import ( "errors" "fmt" + "io" "net" "net/url" "strings" @@ -30,7 +31,7 @@ type v1Puller struct { func (p *v1Puller) Pull(tag string) (fallback bool, err error) { if utils.DigestReference(tag) { // Allowing fallback, because HTTPS v1 is before HTTP v2 - return true, registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")} + return true, registry.ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")} } tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name) @@ -59,6 +60,9 @@ func (p *v1Puller) Pull(tag string) (fallback bool, err error) { // TODO(dmcgowan): Check if should fallback return false, err } + out := p.config.OutStream + out.Write(p.sf.FormatStatus("", "%s: this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.", p.repoInfo.CanonicalName)) + return false, nil } @@ -123,7 +127,7 @@ func (p *v1Puller) pullRepository(askedTag string) error { defer func() { p.graph.Release(sessionID, imgIDs...) }() - for _, image := range repoData.ImgList { + for _, imgData := range repoData.ImgList { downloadImage := func(img *registry.ImgData) { if askedTag != "" && img.Tag != askedTag { errors <- nil @@ -136,32 +140,33 @@ func (p *v1Puller) pullRepository(askedTag string) error { return } - // ensure no two downloads of the same image happen at the same time - if c, err := p.poolAdd("pull", "img:"+img.ID); err != nil { - if c != nil { - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) - <-c - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) - } else { - logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) - } - errors <- nil + if err := image.ValidateID(img.ID); err != nil { + errors <- err return } - defer p.poolRemove("pull", "img:"+img.ID) + + // ensure no two downloads of the same image happen at the same time + poolKey := "img:" + img.ID + broadcaster, found := p.poolAdd("pull", poolKey) + broadcaster.Add(out) + if found { + errors <- broadcaster.Wait() + return + } + defer p.poolRemove("pull", poolKey) // we need to retain it until tagging p.graph.Retain(sessionID, img.ID) imgIDs = append(imgIDs, img.ID) - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil)) success := false var lastErr, err error var isDownloaded bool for _, ep := range p.repoInfo.Index.Mirrors { ep += "v1/" - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) - if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil { + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) + if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep); err != nil { // Don't report errors when pulling from mirrors. logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err) continue @@ -172,12 +177,12 @@ func (p *v1Puller) pullRepository(askedTag string) error { } if !success { for _, ep := range repoData.Endpoints { - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) - if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil { + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) + if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep); err != nil { // It's not ideal that only the last error is returned, it would be better to concatenate the errors. // As the error is also given to the output stream the user will see the error. lastErr = err - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil)) continue } layersDownloaded = layersDownloaded || isDownloaded @@ -187,16 +192,17 @@ func (p *v1Puller) pullRepository(askedTag string) error { } if !success { err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName, lastErr) - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil)) errors <- err + broadcaster.CloseWithError(err) return } - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) errors <- nil } - go downloadImage(image) + go downloadImage(imgData) } var lastError error @@ -226,12 +232,12 @@ func (p *v1Puller) pullRepository(askedTag string) error { return nil } -func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, error) { - history, err := p.session.GetRemoteHistory(imgID, endpoint) +func (p *v1Puller) pullImage(out io.Writer, imgID, endpoint string) (layersDownloaded bool, err error) { + var history []string + history, err = p.session.GetRemoteHistory(imgID, endpoint) if err != nil { return false, err } - out := p.config.OutStream out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil)) // FIXME: Try to stream the images? // FIXME: Launch the getRemoteImage() in goroutines @@ -241,22 +247,34 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro p.graph.Retain(sessionID, history[1:]...) defer p.graph.Release(sessionID, history[1:]...) - layersDownloaded := false + layersDownloaded = false for i := len(history) - 1; i >= 0; i-- { id := history[i] // ensure no two downloads of the same layer happen at the same time - if c, err := p.poolAdd("pull", "layer:"+id); err != nil { - logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) - <-c + poolKey := "layer:" + id + broadcaster, found := p.poolAdd("pull", poolKey) + broadcaster.Add(out) + if found { + logrus.Debugf("Image (id: %s) pull is already running, skipping", id) + err = broadcaster.Wait() + if err != nil { + return layersDownloaded, err + } + continue } - defer p.poolRemove("pull", "layer:"+id) + + // This must use a closure so it captures the value of err when + // the function returns, not when the 'defer' is evaluated. + defer func() { + p.poolRemoveWithError("pull", poolKey, err) + }() if !p.graph.Exists(id) { - out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil)) var ( imgJSON []byte - imgSize int + imgSize int64 err error img *image.Image ) @@ -264,7 +282,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro for j := 1; j <= retries; j++ { imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint) if err != nil && j == retries { - out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) return layersDownloaded, err } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) @@ -273,7 +291,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro img, err = image.NewImgJSON(imgJSON) layersDownloaded = true if err != nil && j == retries { - out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err) } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) @@ -289,8 +307,8 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro if j > 1 { status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) } - out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil)) - layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil)) + layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize) if uerr, ok := err.(*url.Error); ok { err = uerr.Err } @@ -298,16 +316,16 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { - out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) return layersDownloaded, err } layersDownloaded = true defer layer.Close() - err = p.graph.Register(img, + err = p.graph.Register(v1Descriptor{img}, progressreader.New(progressreader.Config{ In: layer, - Out: out, + Out: broadcaster, Formatter: p.sf, Size: imgSize, NewLines: false, @@ -318,14 +336,15 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { - out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil)) return layersDownloaded, err } else { break } } } - out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil)) + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil)) + broadcaster.Close() } return layersDownloaded, nil } diff --git a/vendor/github.com/docker/docker/graph/pull_v2.go b/vendor/github.com/docker/docker/graph/pull_v2.go index 6c2fa206..9eb0b5dd 100644 --- a/vendor/github.com/docker/docker/graph/pull_v2.go +++ b/vendor/github.com/docker/docker/graph/pull_v2.go @@ -1,6 +1,8 @@ package graph import ( + "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -9,15 +11,14 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/docker/image" + "github.com/docker/docker/pkg/broadcaster" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/registry" - "github.com/docker/docker/trust" "github.com/docker/docker/utils" - "github.com/docker/libtrust" "golang.org/x/net/context" ) @@ -33,9 +34,9 @@ type v2Puller struct { func (p *v2Puller) Pull(tag string) (fallback bool, err error) { // TODO(tiborvass): was ReceiveTimeout - p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig) + p.repo, err = newV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") if err != nil { - logrus.Debugf("Error getting v2 registry: %v", err) + logrus.Warnf("Error getting v2 registry: %v", err) return true, err } @@ -72,43 +73,101 @@ func (p *v2Puller) pullV2Repository(tag string) (err error) { } - c, err := p.poolAdd("pull", taggedName) - if err != nil { - if c != nil { - // Another pull of the same repository is already taking place; just wait for it to finish - p.sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", p.repoInfo.CanonicalName) - <-c - return nil - } - return err + poolKey := "v2:" + taggedName + broadcaster, found := p.poolAdd("pull", poolKey) + broadcaster.Add(p.config.OutStream) + if found { + // Another pull of the same repository is already taking place; just wait for it to finish + return broadcaster.Wait() } - defer p.poolRemove("pull", taggedName) + + // This must use a closure so it captures the value of err when the + // function returns, not when the 'defer' is evaluated. + defer func() { + p.poolRemoveWithError("pull", poolKey, err) + }() var layersDownloaded bool for _, tag := range tags { // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? - pulledNew, err := p.pullV2Tag(tag, taggedName) + pulledNew, err := p.pullV2Tag(broadcaster, tag, taggedName) if err != nil { return err } layersDownloaded = layersDownloaded || pulledNew } - writeStatus(taggedName, p.config.OutStream, p.sf, layersDownloaded) + writeStatus(taggedName, broadcaster, p.sf, layersDownloaded) return nil } // downloadInfo is used to pass information from download to extractor type downloadInfo struct { - img *image.Image - tmpFile *os.File - digest digest.Digest - layer distribution.ReadSeekCloser - size int64 - err chan error - verified bool + img contentAddressableDescriptor + imgIndex int + tmpFile *os.File + digest digest.Digest + layer distribution.ReadSeekCloser + size int64 + err chan error + poolKey string + broadcaster *broadcaster.Buffered +} + +// contentAddressableDescriptor is used to pass image data from a manifest to the +// graph. +type contentAddressableDescriptor struct { + id string + parent string + strongID digest.Digest + compatibilityID string + config []byte + v1Compatibility []byte +} + +func newContentAddressableImage(v1Compatibility []byte, blobSum digest.Digest, parent digest.Digest) (contentAddressableDescriptor, error) { + img := contentAddressableDescriptor{ + v1Compatibility: v1Compatibility, + } + + var err error + img.config, err = image.MakeImageConfig(v1Compatibility, blobSum, parent) + if err != nil { + return img, err + } + img.strongID, err = image.StrongID(img.config) + if err != nil { + return img, err + } + + unmarshalledConfig, err := image.NewImgJSON(v1Compatibility) + if err != nil { + return img, err + } + + img.compatibilityID = unmarshalledConfig.ID + img.id = img.strongID.Hex() + + return img, nil +} + +// ID returns the actual ID to be used for the downloaded image. This may be +// a computed ID. +func (img contentAddressableDescriptor) ID() string { + return img.id +} + +// Parent returns the parent ID to be used for the image. This may be a +// computed ID. +func (img contentAddressableDescriptor) Parent() string { + return img.parent +} + +// MarshalConfig renders the image structure into JSON. +func (img contentAddressableDescriptor) MarshalConfig() ([]byte, error) { + return img.config, nil } type errVerification struct{} @@ -116,32 +175,11 @@ type errVerification struct{} func (errVerification) Error() string { return "verification failed" } func (p *v2Puller) download(di *downloadInfo) { - logrus.Debugf("pulling blob %q to %s", di.digest, di.img.ID) + logrus.Debugf("pulling blob %q to %s", di.digest, di.img.id) - out := p.config.OutStream + blobs := p.repo.Blobs(context.Background()) - if c, err := p.poolAdd("pull", "img:"+di.img.ID); err != nil { - if c != nil { - out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Layer already being pulled by another client. Waiting.", nil)) - <-c - out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil)) - } else { - logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", di.img.ID, err) - } - di.err <- nil - return - } - - defer p.poolRemove("pull", "img:"+di.img.ID) - tmpFile, err := ioutil.TempFile("", "GetImageBlob") - if err != nil { - di.err <- err - return - } - - blobs := p.repo.Blobs(nil) - - desc, err := blobs.Stat(nil, di.digest) + desc, err := blobs.Stat(context.Background(), di.digest) if err != nil { logrus.Debugf("Error statting layer: %v", err) di.err <- err @@ -149,7 +187,7 @@ func (p *v2Puller) download(di *downloadInfo) { } di.size = desc.Size - layerDownload, err := blobs.Open(nil, di.digest) + layerDownload, err := blobs.Open(context.Background(), di.digest) if err != nil { logrus.Debugf("Error fetching layer: %v", err) di.err <- err @@ -165,137 +203,208 @@ func (p *v2Puller) download(di *downloadInfo) { reader := progressreader.New(progressreader.Config{ In: ioutil.NopCloser(io.TeeReader(layerDownload, verifier)), - Out: out, + Out: di.broadcaster, Formatter: p.sf, - Size: int(di.size), + Size: di.size, NewLines: false, - ID: stringid.TruncateID(di.img.ID), + ID: stringid.TruncateID(di.img.id), Action: "Downloading", }) - io.Copy(tmpFile, reader) + io.Copy(di.tmpFile, reader) - out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil)) + di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Verifying Checksum", nil)) - di.verified = verifier.Verified() - if !di.verified { - logrus.Infof("Image verification failed for layer %s", di.digest) + if !verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest) + logrus.Error(err) + di.err <- err + return } - out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil)) + di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Download complete", nil)) - logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, tmpFile.Name()) - di.tmpFile = tmpFile + logrus.Debugf("Downloaded %s to tempfile %s", di.img.id, di.tmpFile.Name()) di.layer = layerDownload di.err <- nil } -func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) { +func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (tagUpdated bool, err error) { logrus.Debugf("Pulling tag from V2 registry: %q", tag) - out := p.config.OutStream manSvc, err := p.repo.Manifests(context.Background()) if err != nil { return false, err } - manifest, err := manSvc.GetByTag(tag) + unverifiedManifest, err := manSvc.GetByTag(tag) if err != nil { return false, err } - verified, err := p.validateManifest(manifest, tag) + if unverifiedManifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag %q", tag) + } + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifyManifest(unverifiedManifest, tag) if err != nil { return false, err } - if verified { - logrus.Printf("Image manifest for %s has been verified", taggedName) + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return false, err + } + + imgs, err := p.getImageInfos(verifiedManifest) + if err != nil { + return false, err } out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name())) - downloads := make([]downloadInfo, len(manifest.FSLayers)) + var downloads []*downloadInfo - layerIDs := []string{} + var layerIDs []string defer func() { p.graph.Release(p.sessionID, layerIDs...) + + for _, d := range downloads { + p.poolRemoveWithError("pull", d.poolKey, err) + if d.tmpFile != nil { + d.tmpFile.Close() + if err := os.RemoveAll(d.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name()) + } + } + } }() - for i := len(manifest.FSLayers) - 1; i >= 0; i-- { - img, err := image.NewImgJSON([]byte(manifest.History[i].V1Compatibility)) - if err != nil { - logrus.Debugf("error getting image v1 json: %v", err) - return false, err - } - downloads[i].img = img - downloads[i].digest = manifest.FSLayers[i].BlobSum + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + img := imgs[i] - p.graph.Retain(p.sessionID, img.ID) - layerIDs = append(layerIDs, img.ID) + p.graph.Retain(p.sessionID, img.id) + layerIDs = append(layerIDs, img.id) + + p.graph.imageMutex.Lock(img.id) // Check if exists - if p.graph.Exists(img.ID) { - logrus.Debugf("Image already exists: %s", img.ID) + if p.graph.Exists(img.id) { + if err := p.validateImageInGraph(img.id, imgs, i); err != nil { + p.graph.imageMutex.Unlock(img.id) + return false, fmt.Errorf("image validation failed: %v", err) + } + logrus.Debugf("Image already exists: %s", img.id) + p.graph.imageMutex.Unlock(img.id) + continue + } + p.graph.imageMutex.Unlock(img.id) + + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.id), "Pulling fs layer", nil)) + + d := &downloadInfo{ + img: img, + imgIndex: i, + poolKey: "v2layer:" + img.id, + digest: verifiedManifest.FSLayers[i].BlobSum, + // TODO: seems like this chan buffer solved hanging problem in go1.5, + // this can indicate some deeper problem that somehow we never take + // error from channel in loop below + err: make(chan error, 1), + } + + tmpFile, err := ioutil.TempFile("", "GetImageBlob") + if err != nil { + return false, err + } + d.tmpFile = tmpFile + + downloads = append(downloads, d) + + broadcaster, found := p.poolAdd("pull", d.poolKey) + broadcaster.Add(out) + d.broadcaster = broadcaster + if found { + d.err <- nil + } else { + go p.download(d) + } + } + + for _, d := range downloads { + if err := <-d.err; err != nil { + return false, err + } + + if d.layer == nil { + // Wait for a different pull to download and extract + // this layer. + err = d.broadcaster.Wait() + if err != nil { + return false, err + } continue } - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil)) + d.tmpFile.Seek(0, 0) + err := func() error { + reader := progressreader.New(progressreader.Config{ + In: d.tmpFile, + Out: d.broadcaster, + Formatter: p.sf, + Size: d.size, + NewLines: false, + ID: stringid.TruncateID(d.img.id), + Action: "Extracting", + }) - downloads[i].err = make(chan error) - go p.download(&downloads[i]) + p.graph.imagesMutex.Lock() + defer p.graph.imagesMutex.Unlock() + + p.graph.imageMutex.Lock(d.img.id) + defer p.graph.imageMutex.Unlock(d.img.id) + + // Must recheck the data on disk if any exists. + // This protects against races where something + // else is written to the graph under this ID + // after attemptIDReuse. + if p.graph.Exists(d.img.id) { + if err := p.validateImageInGraph(d.img.id, imgs, d.imgIndex); err != nil { + return fmt.Errorf("image validation failed: %v", err) + } + } + + if err := p.graph.register(d.img, reader); err != nil { + return err + } + + if err := p.graph.setLayerDigest(d.img.id, d.digest); err != nil { + return err + } + + if err := p.graph.setV1CompatibilityConfig(d.img.id, d.img.v1Compatibility); err != nil { + return err + } + + return nil + }() + if err != nil { + return false, err + } + + d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.id), "Pull complete", nil)) + d.broadcaster.Close() + tagUpdated = true } - var tagUpdated bool - for i := len(downloads) - 1; i >= 0; i-- { - d := &downloads[i] - if d.err != nil { - if err := <-d.err; err != nil { - return false, err - } - } - verified = verified && d.verified - if d.layer != nil { - // if tmpFile is empty assume download and extracted elsewhere - defer os.Remove(d.tmpFile.Name()) - defer d.tmpFile.Close() - d.tmpFile.Seek(0, 0) - if d.tmpFile != nil { - - reader := progressreader.New(progressreader.Config{ - In: d.tmpFile, - Out: out, - Formatter: p.sf, - Size: int(d.size), - NewLines: false, - ID: stringid.TruncateID(d.img.ID), - Action: "Extracting", - }) - - err = p.graph.Register(d.img, reader) - if err != nil { - return false, err - } - - if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil { - return false, err - } - - // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) - } - out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil)) - tagUpdated = true - } else { - out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil)) - } - } - - manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName) + manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName) if err != nil { return false, err } // Check for new tag if no layers downloaded if !tagUpdated { - repo, err := p.Get(p.repoInfo.LocalName) + repo, err := p.get(p.repoInfo.LocalName) if err != nil { return false, err } @@ -308,21 +417,18 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) { } } - if verified && tagUpdated { - out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.")) - } - + firstID := layerIDs[len(layerIDs)-1] if utils.DigestReference(tag) { // TODO(stevvooe): Ideally, we should always set the digest so we can // use the digest whether we pull by it or not. Unfortunately, the tag // store treats the digest as a separate tag, meaning there may be an // untagged digest image that would seem to be dangling by a user. - if err = p.SetDigest(p.repoInfo.LocalName, tag, downloads[0].img.ID); err != nil { + if err = p.setDigest(p.repoInfo.LocalName, tag, firstID); err != nil { return false, err } } else { // only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest) - if err = p.Tag(p.repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil { + if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil { return false, err } } @@ -334,76 +440,259 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) { return tagUpdated, nil } -// verifyTrustedKeys checks the keys provided against the trust store, -// ensuring that the provided keys are trusted for the namespace. The keys -// provided from this method must come from the signatures provided as part of -// the manifest JWS package, obtained from unpackSignedManifest or libtrust. -func (p *v2Puller) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey) (verified bool, err error) { - if namespace[0] != '/' { - namespace = "/" + namespace - } - - for _, key := range keys { - b, err := key.MarshalJSON() +func verifyManifest(signedManifest *schema1.SignedManifest, tag string) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if manifestDigest, err := digest.ParseDigest(tag); err == nil { + verifier, err := digest.NewDigestVerifier(manifestDigest) if err != nil { - return false, fmt.Errorf("error marshalling public key: %s", err) + return nil, err } - // Check key has read/write permission (0x03) - v, err := p.trustService.CheckKey(namespace, b, 0x03) + payload, err := signedManifest.Payload() if err != nil { - vErr, ok := err.(trust.NotVerifiedError) - if !ok { - return false, fmt.Errorf("error running key check: %s", err) - } - logrus.Debugf("Key check result: %v", vErr) - } - verified = v - } - - if verified { - logrus.Debug("Key check result: verified") - } - - return -} - -func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (verified bool, err error) { - // TODO(tiborvass): what's the usecase for having manifest == nil and err == nil ? Shouldn't be the error be "DoesNotExist" ? - if m == nil { - return false, fmt.Errorf("image manifest does not exist for tag %q", tag) - } - if m.SchemaVersion != 1 { - return false, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag) - } - if len(m.FSLayers) != len(m.History) { - return false, fmt.Errorf("length of history not equal to number of layers for tag %q", tag) - } - if len(m.FSLayers) == 0 { - return false, fmt.Errorf("no FSLayers in manifest for tag %q", tag) - } - keys, err := manifest.Verify(m) - if err != nil { - return false, fmt.Errorf("error verifying manifest for tag %q: %v", tag, err) - } - verified, err = p.verifyTrustedKeys(m.Name, keys) - if err != nil { - return false, fmt.Errorf("error verifying manifest keys: %v", err) - } - localDigest, err := digest.ParseDigest(tag) - // if pull by digest, then verify - if err == nil { - verifier, err := digest.NewDigestVerifier(localDigest) - if err != nil { - return false, err - } - payload, err := m.Payload() - if err != nil { - return false, err + // If this failed, the signatures section was corrupted + // or missing. Treat the entire manifest as the payload. + payload = signedManifest.Raw } if _, err := verifier.Write(payload); err != nil { - return false, err + return nil, err } - verified = verified && verifier.Verified() + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", manifestDigest) + logrus.Error(err) + return nil, err + } + + var verifiedManifest schema1.Manifest + if err = json.Unmarshal(payload, &verifiedManifest); err != nil { + return nil, err + } + m = &verifiedManifest + } else { + m = &signedManifest.Manifest } - return verified, nil + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for tag %q", tag) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for tag %q", tag) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + images := make([]*image.Image, len(m.FSLayers)) + for i := range m.FSLayers { + img, err := image.NewImgJSON([]byte(m.History[i].V1Compatibility)) + if err != nil { + return err + } + images[i] = img + if err := image.ValidateID(img.ID); err != nil { + return err + } + } + + if images[len(images)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image.") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range images { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(images) - 2; i >= 0; i-- { + if images[i].ID == images[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if images[i].Parent != images[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", images[i+1].ID, images[i].Parent) + } + } + + return nil +} + +// getImageInfos returns an imageinfo struct for every image in the manifest. +// These objects contain both calculated strongIDs and compatibilityIDs found +// in v1Compatibility object. +func (p *v2Puller) getImageInfos(m *schema1.Manifest) ([]contentAddressableDescriptor, error) { + imgs := make([]contentAddressableDescriptor, len(m.FSLayers)) + + var parent digest.Digest + for i := len(imgs) - 1; i >= 0; i-- { + var err error + imgs[i], err = newContentAddressableImage([]byte(m.History[i].V1Compatibility), m.FSLayers[i].BlobSum, parent) + if err != nil { + return nil, err + } + parent = imgs[i].strongID + } + + p.attemptIDReuse(imgs) + + return imgs, nil +} + +// attemptIDReuse does a best attempt to match verified compatibilityIDs +// already in the graph with the computed strongIDs so we can keep using them. +// This process will never fail but may just return the strongIDs if none of +// the compatibilityIDs exists or can be verified. If the strongIDs themselves +// fail verification, we deterministically generate alternate IDs to use until +// we find one that's available or already exists with the correct data. +func (p *v2Puller) attemptIDReuse(imgs []contentAddressableDescriptor) { + // This function needs to be protected with a global lock, because it + // locks multiple IDs at once, and there's no good way to make sure + // the locking happens a deterministic order. + p.graph.imagesMutex.Lock() + defer p.graph.imagesMutex.Unlock() + + idMap := make(map[string]struct{}) + for _, img := range imgs { + idMap[img.id] = struct{}{} + idMap[img.compatibilityID] = struct{}{} + + if p.graph.Exists(img.compatibilityID) { + if _, err := p.graph.generateV1CompatibilityChain(img.compatibilityID); err != nil { + logrus.Debugf("Migration v1Compatibility generation error: %v", err) + return + } + } + } + for id := range idMap { + p.graph.imageMutex.Lock(id) + defer p.graph.imageMutex.Unlock(id) + } + + // continueReuse controls whether the function will try to find + // existing layers on disk under the old v1 IDs, to avoid repulling + // them. The hashes are checked to ensure these layers are okay to + // use. continueReuse starts out as true, but is set to false if + // the code encounters something that doesn't match the expected hash. + continueReuse := true + + for i := len(imgs) - 1; i >= 0; i-- { + if p.graph.Exists(imgs[i].id) { + // Found an image in the graph under the strongID. Validate the + // image before using it. + if err := p.validateImageInGraph(imgs[i].id, imgs, i); err != nil { + continueReuse = false + logrus.Debugf("not using existing strongID: %v", err) + + // The strong ID existed in the graph but didn't + // validate successfully. We can't use the strong ID + // because it didn't validate successfully. Treat the + // graph like a hash table with probing... compute + // SHA256(id) until we find an ID that either doesn't + // already exist in the graph, or has existing content + // that validates successfully. + for { + if err := p.tryNextID(imgs, i, idMap); err != nil { + logrus.Debug(err.Error()) + } else { + break + } + } + } + continue + } + + if continueReuse { + compatibilityID := imgs[i].compatibilityID + if err := p.validateImageInGraph(compatibilityID, imgs, i); err != nil { + logrus.Debugf("stopping ID reuse: %v", err) + continueReuse = false + } else { + // The compatibility ID exists in the graph and was + // validated. Use it. + imgs[i].id = compatibilityID + } + } + } + + // fix up the parents of the images + for i := 0; i < len(imgs); i++ { + if i == len(imgs)-1 { // Base layer + imgs[i].parent = "" + } else { + imgs[i].parent = imgs[i+1].id + } + } +} + +// validateImageInGraph checks that an image in the graph has the expected +// strongID. id is the entry in the graph to check, imgs is the slice of +// images being processed (for access to the parent), and i is the index +// into this slice which the graph entry should be checked against. +func (p *v2Puller) validateImageInGraph(id string, imgs []contentAddressableDescriptor, i int) error { + img, err := p.graph.Get(id) + if err != nil { + return fmt.Errorf("missing: %v", err) + } + layerID, err := p.graph.getLayerDigest(id) + if err != nil { + return fmt.Errorf("digest: %v", err) + } + var parentID digest.Digest + if i != len(imgs)-1 { + if img.Parent != imgs[i+1].id { // comparing that graph points to validated ID + return fmt.Errorf("parent: %v %v", img.Parent, imgs[i+1].id) + } + parentID = imgs[i+1].strongID + } else if img.Parent != "" { + return fmt.Errorf("unexpected parent: %v", img.Parent) + } + + v1Config, err := p.graph.getV1CompatibilityConfig(img.ID) + if err != nil { + return fmt.Errorf("v1Compatibility: %v %v", img.ID, err) + } + + json, err := image.MakeImageConfig(v1Config, layerID, parentID) + if err != nil { + return fmt.Errorf("make config: %v", err) + } + + if dgst, err := image.StrongID(json); err == nil && dgst == imgs[i].strongID { + logrus.Debugf("Validated %v as %v", dgst, id) + } else { + return fmt.Errorf("digest mismatch: %v %v, error: %v", dgst, imgs[i].strongID, err) + } + + // All clear + return nil +} + +func (p *v2Puller) tryNextID(imgs []contentAddressableDescriptor, i int, idMap map[string]struct{}) error { + nextID, _ := digest.FromBytes([]byte(imgs[i].id)) + imgs[i].id = nextID.Hex() + + if _, exists := idMap[imgs[i].id]; !exists { + p.graph.imageMutex.Lock(imgs[i].id) + defer p.graph.imageMutex.Unlock(imgs[i].id) + } + + if p.graph.Exists(imgs[i].id) { + if err := p.validateImageInGraph(imgs[i].id, imgs, i); err != nil { + return fmt.Errorf("not using existing strongID permutation %s: %v", imgs[i].id, err) + } + } + return nil } diff --git a/vendor/github.com/docker/docker/graph/pull_v2_test.go b/vendor/github.com/docker/docker/graph/pull_v2_test.go new file mode 100644 index 00000000..f7ec1e28 --- /dev/null +++ b/vendor/github.com/docker/docker/graph/pull_v2_test.go @@ -0,0 +1,195 @@ +package graph + +import ( + "encoding/json" + "io/ioutil" + "reflect" + "strings" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" +) + +// TestFixManifestLayers checks that fixManifestLayers removes a duplicate +// layer, and that it makes no changes to the manifest when called a second +// time, after the duplicate is removed. +func TestFixManifestLayers(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + duplicateLayerManifestExpectedOutput := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") + } + + // Run fixManifestLayers again and confirm that it doesn't change the + // manifest (which no longer has duplicate layers). + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") + } +} + +// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails +// if the base layer configuration specifies a parent. +func TestFixManifestLayersBaseLayerParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID in the base layer of the image.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails +// if an image configuration specifies a parent that doesn't directly follow +// that (deduplicated) image in the image history. +func TestFixManifestLayersBadParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestValidateManifest verifies the validateManifest function +func TestValidateManifest(t *testing.T) { + expectedDigest := "sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd" + expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + + // Good manifest + + goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var goodSignedManifest schema1.SignedManifest + err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err := verifyManifest(&goodSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in good manifest") + } + + // "Extra data" manifest + + extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var extraDataSignedManifest schema1.SignedManifest + err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifyManifest(&extraDataSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in extra data manifest") + } + + // Bad manifest + + badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var badSignedManifest schema1.SignedManifest + err = json.Unmarshal(badManifestBytes, &badSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifyManifest(&badSignedManifest, expectedDigest) + if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { + t.Fatal("expected validateManifest to fail with digest error") + } + + // Manifest with no signature + + expectedWholeFileDigest := "7ec3615a120efcdfc270e9c7ea4183330775a3e52a09e2efb194b9a7c18e5ff7" + + noSignatureManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/no_signature_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var noSignatureSignedManifest schema1.SignedManifest + noSignatureSignedManifest.Raw = noSignatureManifestBytes + err = json.Unmarshal(noSignatureManifestBytes, &noSignatureSignedManifest.Manifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifyManifest(&noSignatureSignedManifest, expectedWholeFileDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in no-signature manifest") + } +} diff --git a/vendor/github.com/docker/docker/graph/push.go b/vendor/github.com/docker/docker/graph/push.go index 9095ae6a..750eec50 100644 --- a/vendor/github.com/docker/docker/graph/push.go +++ b/vendor/github.com/docker/docker/graph/push.go @@ -5,6 +5,7 @@ import ( "io" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" "github.com/docker/docker/cliconfig" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/registry" @@ -12,18 +13,22 @@ import ( // ImagePushConfig stores push configuration. type ImagePushConfig struct { - // MetaHeaders store meta data about the image (DockerHeaders with prefix X-Meta- in the request). + // MetaHeaders store HTTP headers with metadata about the image + // (DockerHeaders with prefix X-Meta- in the request). MetaHeaders map[string][]string - // AuthConfig holds authentication information for authorizing with the registry. + // AuthConfig holds authentication credentials for authenticating with + // the registry. AuthConfig *cliconfig.AuthConfig - // Tag is the specific variant of the image to be pushed, this tag used when image is pushed. If no tag is provided, all tags will be pushed. + // Tag is the specific variant of the image to be pushed. + // If no tag is provided, all tags will be pushed. Tag string - // OutStream is the output writer for showing the status of the push operation. + // OutStream is the output writer for showing the status of the push + // operation. OutStream io.Writer } -// Pusher is an interface to define Push behavior. -type Pusher interface { +// pusher is an interface that abstracts pushing for different API versions. +type pusher interface { // Push tries to push the image configured at the creation of Pusher. // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. // @@ -31,18 +36,22 @@ type Pusher interface { Push() (fallback bool, err error) } -// NewPusher returns a new instance of an implementation conforming to Pusher interface. -func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig, sf *streamformatter.StreamFormatter) (Pusher, error) { +// newPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func (s *TagStore) newPusher(endpoint registry.APIEndpoint, localRepo repository, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig, sf *streamformatter.StreamFormatter) (pusher, error) { switch endpoint.Version { case registry.APIVersion2: return &v2Pusher{ - TagStore: s, - endpoint: endpoint, - localRepo: localRepo, - repoInfo: repoInfo, - config: imagePushConfig, - sf: sf, - layersSeen: make(map[string]bool), + TagStore: s, + endpoint: endpoint, + localRepo: localRepo, + repoInfo: repoInfo, + config: imagePushConfig, + sf: sf, + layersPushed: make(map[digest.Digest]bool), }, nil case registry.APIVersion1: return &v1Pusher{ @@ -57,10 +66,10 @@ func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) } -// FIXME: Allow to interrupt current push when new push of same image is done. - -// Push a image to the repo. +// Push initiates a push operation on the repository named localName. func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + var sf = streamformatter.NewJSONStreamFormatter() // Resolve the Repository name from fqn to RepositoryInfo @@ -69,7 +78,7 @@ func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) erro return err } - endpoints, err := s.registryService.LookupEndpoints(repoInfo.CanonicalName) + endpoints, err := s.registryService.LookupPushEndpoints(repoInfo.CanonicalName) if err != nil { return err } @@ -91,7 +100,7 @@ func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) erro for _, endpoint := range endpoints { logrus.Debugf("Trying to push %s to %s %s", repoInfo.CanonicalName, endpoint.URL, endpoint.Version) - pusher, err := s.NewPusher(endpoint, localRepo, repoInfo, imagePushConfig, sf) + pusher, err := s.newPusher(endpoint, localRepo, repoInfo, imagePushConfig, sf) if err != nil { lastErr = err continue diff --git a/vendor/github.com/docker/docker/graph/push_v1.go b/vendor/github.com/docker/docker/graph/push_v1.go index d473040f..01ad73ed 100644 --- a/vendor/github.com/docker/docker/graph/push_v1.go +++ b/vendor/github.com/docker/docker/graph/push_v1.go @@ -8,6 +8,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/image" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" @@ -19,7 +20,7 @@ import ( type v1Pusher struct { *TagStore endpoint registry.APIEndpoint - localRepo Repository + localRepo repository repoInfo *registry.RepositoryInfo config *ImagePushConfig sf *streamformatter.StreamFormatter @@ -127,7 +128,7 @@ func (s *TagStore) createImageIndex(images []string, tags map[string][]string) [ continue } // If the image does not have a tag it still needs to be sent to the - // registry with an empty tag so that it is accociated with the repository + // registry with an empty tag so that it is associated with the repository imageIndex = append(imageIndex, ®istry.ImgData{ ID: id, Tag: "", @@ -137,9 +138,9 @@ func (s *TagStore) createImageIndex(images []string, tags map[string][]string) [ } type imagePushData struct { - id string - endpoint string - tokens []string + id string + compatibilityID string + endpoint string } // lookupImageOnEndpoint checks the specified endpoint to see if an image exists @@ -147,7 +148,7 @@ type imagePushData struct { func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, images chan imagePushData, imagesToPush chan string) { defer wg.Done() for image := range images { - if err := p.session.LookupRemoteImage(image.id, image.endpoint); err != nil { + if err := p.session.LookupRemoteImage(image.compatibilityID, image.endpoint); err != nil { logrus.Errorf("Error in LookupRemoteImage: %s", err) imagesToPush <- image.id continue @@ -181,10 +182,14 @@ func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageIDs []string, tags pushes <- shouldPush }() for _, id := range imageIDs { + compatibilityID, err := p.getV1ID(id) + if err != nil { + return err + } imageData <- imagePushData{ - id: id, - endpoint: endpoint, - tokens: repo.Tokens, + id: id, + compatibilityID: compatibilityID, + endpoint: endpoint, } } // close the channel to notify the workers that there will be no more images to check. @@ -197,14 +202,18 @@ func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageIDs []string, tags // is very important that is why we are still iterating over the ordered list of imageIDs. for _, id := range imageIDs { if _, push := shouldPush[id]; push { - if _, err := p.pushImage(id, endpoint, repo.Tokens); err != nil { + if _, err := p.pushImage(id, endpoint); err != nil { // FIXME: Continue on error? return err } } for _, tag := range tags[id] { p.out.Write(p.sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+p.repoInfo.RemoteName+"/tags/"+tag)) - if err := p.session.PushRegistryTag(p.repoInfo.RemoteName, id, tag, endpoint); err != nil { + compatibilityID, err := p.getV1ID(id) + if err != nil { + return err + } + if err := p.session.PushRegistryTag(p.repoInfo.RemoteName, compatibilityID, tag, endpoint); err != nil { return err } } @@ -214,7 +223,6 @@ func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageIDs []string, tags // pushRepository pushes layers that do not already exist on the registry. func (p *v1Pusher) pushRepository(tag string) error { - logrus.Debugf("Local repo: %s", p.localRepo) p.out = ioutils.NewWriteFlusher(p.config.OutStream) imgList, tags, err := p.getImageList(tag) @@ -227,10 +235,16 @@ func (p *v1Pusher) pushRepository(tag string) error { logrus.Debugf("Preparing to push %s with the following images and tags", p.localRepo) for _, data := range imageIndex { logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + + // convert IDs to compatibilityIDs, imageIndex only used in registry calls + data.ID, err = p.getV1ID(data.ID) + if err != nil { + return err + } } - if _, err := p.poolAdd("push", p.repoInfo.LocalName); err != nil { - return err + if _, found := p.poolAdd("push", p.repoInfo.LocalName); found { + return fmt.Errorf("push or pull %s is already in progress", p.repoInfo.LocalName) } defer p.poolRemove("push", p.repoInfo.LocalName) @@ -255,43 +269,50 @@ func (p *v1Pusher) pushRepository(tag string) error { return err } -func (p *v1Pusher) pushImage(imgID, ep string, token []string) (checksum string, err error) { - jsonRaw, err := p.graph.RawJSON(imgID) +func (p *v1Pusher) pushImage(imgID, ep string) (checksum string, err error) { + jsonRaw, err := p.getV1Config(imgID) if err != nil { return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) } p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil)) + compatibilityID, err := p.getV1ID(imgID) + if err != nil { + return "", err + } + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() imgData := ®istry.ImgData{ - ID: imgID, + ID: compatibilityID, } // Send the json if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { if err == registry.ErrAlreadyExists { - p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) + p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image already pushed, skipping", nil)) return "", nil } return "", err } - layerData, err := p.graph.TempLayerArchive(imgID, p.sf, p.out) + layerData, err := p.graph.tempLayerArchive(imgID, p.sf, p.out) if err != nil { return "", fmt.Errorf("Failed to generate layer archive: %s", err) } defer os.RemoveAll(layerData.Name()) // Send the layer - logrus.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) + logrus.Debugf("rendered layer for %s of [%d] size", imgID, layerData.Size) checksum, checksumPayload, err := p.session.PushImageLayerRegistry(imgData.ID, progressreader.New(progressreader.Config{ In: layerData, Out: p.out, Formatter: p.sf, - Size: int(layerData.Size), + Size: layerData.Size, NewLines: false, - ID: stringid.TruncateID(imgData.ID), + ID: stringid.TruncateID(imgID), Action: "Pushing", }), ep, jsonRaw) if err != nil { @@ -304,6 +325,30 @@ func (p *v1Pusher) pushImage(imgID, ep string, token []string) (checksum string, return "", err } - p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image successfully pushed", nil)) + p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image successfully pushed", nil)) return imgData.Checksum, nil } + +// getV1ID returns the compatibilityID for the ID in the graph. compatibilityID +// is read from from the v1Compatibility config file in the disk. +func (p *v1Pusher) getV1ID(id string) (string, error) { + jsonData, err := p.getV1Config(id) + if err != nil { + return "", err + } + img, err := image.NewImgJSON(jsonData) + if err != nil { + return "", err + } + return img.ID, nil +} + +// getV1Config returns v1Compatibility config for the image in the graph. If +// there is no v1Compatibility file on disk for the image +func (p *v1Pusher) getV1Config(id string) ([]byte, error) { + jsonData, err := p.graph.generateV1CompatibilityChain(id) + if err != nil { + return nil, err + } + return jsonData, nil +} diff --git a/vendor/github.com/docker/docker/graph/push_v2.go b/vendor/github.com/docker/docker/graph/push_v2.go index 869347df..8586fa91 100644 --- a/vendor/github.com/docker/docker/graph/push_v2.go +++ b/vendor/github.com/docker/docker/graph/push_v2.go @@ -1,14 +1,17 @@ package graph import ( + "bufio" + "compress/gzip" "fmt" + "io" "io/ioutil" - "os" "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/docker/image" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" @@ -19,23 +22,25 @@ import ( "golang.org/x/net/context" ) +const compressionBufSize = 32768 + type v2Pusher struct { *TagStore endpoint registry.APIEndpoint - localRepo Repository + localRepo repository repoInfo *registry.RepositoryInfo config *ImagePushConfig sf *streamformatter.StreamFormatter repo distribution.Repository - // layersSeen is the set of layers known to exist on the remote side. + // layersPushed is the set of layers known to exist on the remote side. // This avoids redundant queries when pushing multiple tags that // involve the same layers. - layersSeen map[string]bool + layersPushed map[digest.Digest]bool } func (p *v2Pusher) Push() (fallback bool, err error) { - p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig) + p.repo, err = newV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") if err != nil { logrus.Debugf("Error getting v2 registry: %v", err) return true, err @@ -62,8 +67,8 @@ func (p *v2Pusher) getImageTags(askedTag string) ([]string, error) { func (p *v2Pusher) pushV2Repository(tag string) error { localName := p.repoInfo.LocalName - if _, err := p.poolAdd("push", localName); err != nil { - return err + if _, found := p.poolAdd("push", localName); found { + return fmt.Errorf("push or pull %s is already in progress", localName) } defer p.poolRemove("push", localName) @@ -92,20 +97,22 @@ func (p *v2Pusher) pushV2Tag(tag string) error { return fmt.Errorf("tag does not exist: %s", tag) } + layersSeen := make(map[string]bool) + layer, err := p.graph.Get(layerID) if err != nil { return err } - m := &manifest.Manifest{ + m := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: p.repo.Name(), Tag: tag, Architecture: layer.Architecture, - FSLayers: []manifest.FSLayer{}, - History: []manifest.History{}, + FSLayers: []schema1.FSLayer{}, + History: []schema1.History{}, } var metadata runconfig.Config @@ -120,7 +127,11 @@ func (p *v2Pusher) pushV2Tag(tag string) error { return err } - if p.layersSeen[layer.ID] { + // break early if layer has already been seen in this image, + // this prevents infinite loops on layers which loopback, this + // cannot be prevented since layer IDs are not merkle hashes + // TODO(dmcgowan): throw error if no valid use case is found + if layersSeen[layer.ID] { break } @@ -132,16 +143,18 @@ func (p *v2Pusher) pushV2Tag(tag string) error { } } - jsonData, err := p.graph.RawJSON(layer.ID) - if err != nil { - return fmt.Errorf("cannot retrieve the path for %s: %s", layer.ID, err) - } - var exists bool - dgst, err := p.graph.GetDigest(layer.ID) + dgst, err := p.graph.getLayerDigestWithLock(layer.ID) switch err { case nil: - _, err := p.repo.Blobs(nil).Stat(nil, dgst) + if p.layersPushed[dgst] { + exists = true + // break out of switch, it is already known that + // the push is not needed and therefore doing a + // stat is unnecessary + break + } + _, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst) switch err { case nil: exists = true @@ -152,7 +165,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error { out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil)) return err } - case ErrDigestNotSet: + case errDigestNotSet: // nop case digest.ErrDigestInvalidFormat, digest.ErrDigestUnsupported: return fmt.Errorf("error getting image checksum: %v", err) @@ -161,25 +174,34 @@ func (p *v2Pusher) pushV2Tag(tag string) error { // if digest was empty or not saved, or if blob does not exist on the remote repository, // then fetch it. if !exists { - if pushDigest, err := p.pushV2Image(p.repo.Blobs(nil), layer); err != nil { + var pushDigest digest.Digest + if pushDigest, err = p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil { return err - } else if pushDigest != dgst { + } + if dgst == "" { // Cache new checksum - if err := p.graph.SetDigest(layer.ID, pushDigest); err != nil { + if err := p.graph.setLayerDigestWithLock(layer.ID, pushDigest); err != nil { return err } - dgst = pushDigest } + dgst = pushDigest } - m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst}) - m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)}) + // read v1Compatibility config, generate new if needed + jsonData, err := p.graph.generateV1CompatibilityChain(layer.ID) + if err != nil { + return err + } - p.layersSeen[layer.ID] = true + m.FSLayers = append(m.FSLayers, schema1.FSLayer{BlobSum: dgst}) + m.History = append(m.History, schema1.History{V1Compatibility: string(jsonData)}) + + layersSeen[layer.ID] = true + p.layersPushed[dgst] = true } logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID()) - signed, err := manifest.Sign(m, p.trustKey) + signed, err := schema1.Sign(m, p.trustKey) if err != nil { return err } @@ -202,62 +224,78 @@ func (p *v2Pusher) pushV2Tag(tag string) error { func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) { out := p.config.OutStream - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil)) + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil)) image, err := p.graph.Get(img.ID) if err != nil { return "", err } - arch, err := p.graph.TarLayer(image) - if err != nil { - return "", err - } - - tf, err := p.graph.newTempFile() - if err != nil { - return "", err - } - defer func() { - tf.Close() - os.Remove(tf.Name()) - }() - - size, dgst, err := bufferToFile(tf, arch) + arch, err := p.graph.tarLayer(image) if err != nil { return "", err } + defer arch.Close() // Send the layer - logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size) - layerUpload, err := bs.Create(nil) + layerUpload, err := bs.Create(context.Background()) if err != nil { return "", err } defer layerUpload.Close() reader := progressreader.New(progressreader.Config{ - In: ioutil.NopCloser(tf), + In: ioutil.NopCloser(arch), // we'll take care of close here. Out: out, Formatter: p.sf, - Size: int(size), - NewLines: false, - ID: stringid.TruncateID(img.ID), - Action: "Pushing", + + // TODO(stevvooe): This may cause a size reporting error. Try to get + // this from tar-split or elsewhere. The main issue here is that we + // don't want to buffer to disk *just* to calculate the size. + Size: img.Size, + + NewLines: false, + ID: stringid.TruncateID(img.ID), + Action: "Pushing", }) - n, err := layerUpload.ReadFrom(reader) + + digester := digest.Canonical.New() + // HACK: The MultiWriter doesn't write directly to layerUpload because + // we must make sure the ReadFrom is used, not Write. Using Write would + // send a PATCH request for every Write call. + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(io.MultiWriter(pipeWriter, digester.Hash()), compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, reader) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + }() + + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil)) + nn, err := layerUpload.ReadFrom(pipeReader) + pipeReader.Close() if err != nil { return "", err } - if n != size { - return "", fmt.Errorf("short upload: only wrote %d of %d", n, size) - } - desc := distribution.Descriptor{Digest: dgst} - if _, err := layerUpload.Commit(nil, desc); err != nil { + dgst := digester.Digest() + if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil { return "", err } - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil)) + logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn) + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil)) return dgst, nil } diff --git a/vendor/github.com/docker/docker/graph/registry.go b/vendor/github.com/docker/docker/graph/registry.go index 974235b2..a9a4ad18 100644 --- a/vendor/github.com/docker/docker/graph/registry.go +++ b/vendor/github.com/docker/docker/graph/registry.go @@ -7,10 +7,12 @@ import ( "net/url" "time" + "strings" + "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" @@ -27,8 +29,10 @@ func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { return dcs.auth.Username, dcs.auth.Password } -// NewV2Repository creates a v2 only repository. -func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *cliconfig.AuthConfig) (distribution.Repository, error) { +// newV2Repository returns a repository (v2 only). It creates a HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func newV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *cliconfig.AuthConfig, actions ...string) (distribution.Repository, error) { ctx := context.Background() repoName := repoInfo.CanonicalName @@ -55,9 +59,9 @@ func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEnd authTransport := transport.NewTransport(base, modifiers...) pingClient := &http.Client{ Transport: authTransport, - Timeout: 5 * time.Second, + Timeout: 15 * time.Second, } - endpointStr := endpoint.URL + "/v2/" + endpointStr := strings.TrimRight(endpoint.URL, "/") + "/v2/" req, err := http.NewRequest("GET", endpointStr, nil) if err != nil { return nil, err @@ -89,7 +93,7 @@ func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEnd } creds := dumbCredentialStore{auth: authConfig} - tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, "push", "pull") + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, actions...) basicHandler := auth.NewBasicHandler(creds) modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) tr := transport.NewTransport(base, modifiers...) @@ -97,11 +101,12 @@ func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEnd return client.NewRepository(ctx, repoName, endpoint.URL, tr) } -func digestFromManifest(m *manifest.SignedManifest, localName string) (digest.Digest, int, error) { +func digestFromManifest(m *schema1.SignedManifest, localName string) (digest.Digest, int, error) { payload, err := m.Payload() if err != nil { - logrus.Debugf("could not retrieve manifest payload: %v", err) - return "", 0, err + // If this failed, the signatures section was corrupted + // or missing. Treat the entire manifest as the payload. + payload = m.Raw } manifestDigest, err := digest.FromBytes(payload) if err != nil { diff --git a/vendor/github.com/docker/docker/graph/service.go b/vendor/github.com/docker/docker/graph/service.go index 1ab7485e..11fe9266 100644 --- a/vendor/github.com/docker/docker/graph/service.go +++ b/vendor/github.com/docker/docker/graph/service.go @@ -8,31 +8,39 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" + "github.com/docker/docker/utils" ) -func (s *TagStore) lookupRaw(name string) ([]byte, error) { - image, err := s.LookupImage(name) - if err != nil || image == nil { - return nil, fmt.Errorf("No such image %s", name) - } - - imageInspectRaw, err := s.graph.RawJSON(image.ID) - if err != nil { - return nil, err - } - - return imageInspectRaw, nil -} - -// Lookup return an image encoded in JSON +// Lookup looks up an image by name in a TagStore and returns it as an +// ImageInspect structure. func (s *TagStore) Lookup(name string) (*types.ImageInspect, error) { image, err := s.LookupImage(name) if err != nil || image == nil { return nil, fmt.Errorf("No such image: %s", name) } + var repoTags = make([]string, 0) + var repoDigests = make([]string, 0) + + s.Lock() + for repoName, repository := range s.Repositories { + for ref, id := range repository { + if id == image.ID { + imgRef := utils.ImageReference(repoName, ref) + if utils.DigestReference(ref) { + repoDigests = append(repoDigests, imgRef) + } else { + repoTags = append(repoTags, imgRef) + } + } + } + } + s.Unlock() + imageInspect := &types.ImageInspect{ - Id: image.ID, + ID: image.ID, + RepoTags: repoTags, + RepoDigests: repoDigests, Parent: image.Parent, Comment: image.Comment, Created: image.Created.Format(time.RFC3339Nano), @@ -44,7 +52,7 @@ func (s *TagStore) Lookup(name string) (*types.ImageInspect, error) { Architecture: image.Architecture, Os: image.OS, Size: image.Size, - VirtualSize: s.graph.GetParentsSize(image, 0) + image.Size, + VirtualSize: s.graph.getParentsSize(image) + image.Size, } imageInspect.GraphDriver.Name = s.graph.driver.String() @@ -57,13 +65,13 @@ func (s *TagStore) Lookup(name string) (*types.ImageInspect, error) { return imageInspect, nil } -// ImageTarLayer return the tarLayer of the image -func (s *TagStore) ImageTarLayer(name string, dest io.Writer) error { +// imageTarLayer return the tarLayer of the image +func (s *TagStore) imageTarLayer(name string, dest io.Writer) error { if image, err := s.LookupImage(name); err == nil && image != nil { // On Windows, the base layer cannot be exported if runtime.GOOS != "windows" || image.Parent != "" { - fs, err := s.graph.TarLayer(image) + fs, err := s.graph.tarLayer(image) if err != nil { return err } diff --git a/vendor/github.com/docker/docker/graph/tags.go b/vendor/github.com/docker/docker/graph/tags.go index 7ea782fe..d9423373 100644 --- a/vendor/github.com/docker/docker/graph/tags.go +++ b/vendor/github.com/docker/docker/graph/tags.go @@ -16,64 +16,53 @@ import ( "github.com/docker/docker/daemon/events" "github.com/docker/docker/graph/tags" "github.com/docker/docker/image" + "github.com/docker/docker/pkg/broadcaster" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/registry" - "github.com/docker/docker/trust" "github.com/docker/docker/utils" "github.com/docker/libtrust" ) -// DefaultTag defines the default tag used when performing images related actions and no tag string is specified -const DefaultTag = "latest" +// ErrNameIsNotExist returned when there is no image with requested name. +var ErrNameIsNotExist = errors.New("image with specified name does not exist") -// TagStore contains information to push and pull to the repo. +// TagStore manages repositories. It encompasses the Graph used for versioned +// storage, as well as various services involved in pushing and pulling +// repositories. type TagStore struct { - path string - graph *Graph - Repositories map[string]Repository + path string + graph *Graph + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository trustKey libtrust.PrivateKey sync.Mutex // FIXME: move push/pull-related fields // to a helper type - pullingPool map[string]chan struct{} - pushingPool map[string]chan struct{} + pullingPool map[string]*broadcaster.Buffered + pushingPool map[string]*broadcaster.Buffered registryService *registry.Service eventsService *events.Events - trustService *trust.TrustStore } -// Repository maps image id to image tag. -type Repository map[string]string +// repository maps tags to image IDs. +type repository map[string]string -// Update updates repository mapping with content of repository 'u'. -func (r Repository) Update(u Repository) { - for k, v := range u { - r[k] = v - } -} - -// Contains returns true if the contents of u Repository, are wholly contained in r Repository. -func (r Repository) Contains(u Repository) bool { - for k, v := range u { - // if u's key is not present in r OR u's key is present, but not the same value - if rv, ok := r[k]; !ok || (ok && rv != v) { - return false - } - } - return true -} - -// TagStoreConfig holds tag store configuration. +// TagStoreConfig provides parameters for a new TagStore. type TagStoreConfig struct { - Graph *Graph - Key libtrust.PrivateKey + // Graph is the versioned image store + Graph *Graph + // Key is the private key to use for signing manifests. + Key libtrust.PrivateKey + // Registry is the registry service to use for TLS configuration and + // endpoint lookup. Registry *registry.Service - Events *events.Events - Trust *trust.TrustStore + // Events is the events service to use for logging. + Events *events.Events } -// NewTagStore creates a tag store to specified path. +// NewTagStore creates a new TagStore at specified path, using the parameters +// and services provided in cfg. func NewTagStore(path string, cfg *TagStoreConfig) (*TagStore, error) { abspath, err := filepath.Abs(path) if err != nil { @@ -84,12 +73,11 @@ func NewTagStore(path string, cfg *TagStoreConfig) (*TagStore, error) { path: abspath, graph: cfg.Graph, trustKey: cfg.Key, - Repositories: make(map[string]Repository), - pullingPool: make(map[string]chan struct{}), - pushingPool: make(map[string]chan struct{}), + Repositories: make(map[string]repository), + pullingPool: make(map[string]*broadcaster.Buffered), + pushingPool: make(map[string]*broadcaster.Buffered), registryService: cfg.Registry, eventsService: cfg.Events, - trustService: cfg.Trust, } // Load the json file if it exists, otherwise create it. if err := store.reload(); os.IsNotExist(err) { @@ -126,26 +114,26 @@ func (store *TagStore) reload() error { return nil } -// LookupImage returns the image from the store. +// LookupImage returns pointer to an Image struct corresponding to the given +// name. The name can include an optional tag; otherwise the default tag will +// be used. func (store *TagStore) LookupImage(name string) (*image.Image, error) { - // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else - // (so we can pass all errors here) repoName, ref := parsers.ParseRepositoryTag(name) if ref == "" { - ref = DefaultTag + ref = tags.DefaultTag } var ( err error img *image.Image ) - img, err = store.GetImage(repoName, ref) + img, err = store.getImage(repoName, ref) if err != nil { return nil, err } if img != nil { - return img, err + return img, nil } // name must be an image ID. @@ -158,8 +146,28 @@ func (store *TagStore) LookupImage(name string) (*image.Image, error) { return img, nil } -// ByID returns a reverse-lookup table of all the names which refer to each image. -// Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} +// GetID returns ID for image name. +func (store *TagStore) GetID(name string) (string, error) { + repoName, ref := parsers.ParseRepositoryTag(name) + if ref == "" { + ref = tags.DefaultTag + } + store.Lock() + defer store.Unlock() + repoName = registry.NormalizeLocalName(repoName) + repo, ok := store.Repositories[repoName] + if !ok { + return "", ErrNameIsNotExist + } + id, ok := repo[ref] + if !ok { + return "", ErrNameIsNotExist + } + return id, nil +} + +// ByID returns a reverse-lookup table of all the names which refer to each +// image - e.g. {"43b5f19b10584": {"base:latest", "base:v1"}} func (store *TagStore) ByID() map[string][]string { store.Lock() defer store.Unlock() @@ -178,36 +186,15 @@ func (store *TagStore) ByID() map[string][]string { return byID } -// ImageName returns name of the image. -func (store *TagStore) ImageName(id string) string { - if names, exists := store.ByID()[id]; exists && len(names) > 0 { - return names[0] - } - return stringid.TruncateID(id) +// HasReferences returns whether or not the given image is referenced in one or +// more repositories. +func (store *TagStore) HasReferences(img *image.Image) bool { + return len(store.ByID()[img.ID]) > 0 } -// DeleteAll removes images identified by a specific id from the store. -func (store *TagStore) DeleteAll(id string) error { - names, exists := store.ByID()[id] - if !exists || len(names) == 0 { - return nil - } - for _, name := range names { - if strings.Contains(name, ":") { - nameParts := strings.Split(name, ":") - if _, err := store.Delete(nameParts[0], nameParts[1]); err != nil { - return err - } - } else { - if _, err := store.Delete(name, ""); err != nil { - return err - } - } - } - return nil -} - -// Delete removes a repo identified by a given name from the store +// Delete deletes a repository or a specific tag. If ref is empty, the entire +// repository named repoName will be deleted; otherwise only the tag named by +// ref will be deleted. func (store *TagStore) Delete(repoName, ref string) (bool, error) { store.Lock() defer store.Unlock() @@ -240,14 +227,16 @@ func (store *TagStore) Delete(repoName, ref string) (bool, error) { return deleted, store.save() } -// Tag adds a new tag to an existing image. +// Tag creates a tag in the repository reponame, pointing to the image named +// imageName. If force is true, an existing tag with the same name may be +// overwritten. func (store *TagStore) Tag(repoName, tag, imageName string, force bool) error { - return store.SetLoad(repoName, tag, imageName, force, nil) + return store.setLoad(repoName, tag, imageName, force, nil) } -// SetLoad stores the image to the store. +// setLoad stores the image to the store. // If the imageName is already in the repo then a '-f' flag should be used to replace existing image. -func (store *TagStore) SetLoad(repoName, tag, imageName string, force bool, out io.Writer) error { +func (store *TagStore) setLoad(repoName, tag, imageName string, force bool, out io.Writer) error { img, err := store.LookupImage(imageName) store.Lock() defer store.Unlock() @@ -255,32 +244,25 @@ func (store *TagStore) SetLoad(repoName, tag, imageName string, force bool, out return err } if tag == "" { - tag = tags.DEFAULTTAG + tag = tags.DefaultTag } if err := validateRepoName(repoName); err != nil { return err } if err := tags.ValidateTagName(tag); err != nil { - if _, formatError := err.(tags.ErrTagInvalidFormat); !formatError { - return err - } - if _, dErr := digest.ParseDigest(tag); dErr != nil { - // Still return the tag validation error. - // It's more likely to be a user generated issue. - return err - } + return err } if err := store.reload(); err != nil { return err } - var repo Repository + var repo repository repoName = registry.NormalizeLocalName(repoName) if r, exists := store.Repositories[repoName]; exists { repo = r if old, exists := store.Repositories[repoName][tag]; exists { if !force { - return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", tag, old) + return fmt.Errorf("Conflict: Tag %s:%s is already set to image %s, if you want to replace it, please use -f option", repoName, tag, old[:12]) } if old != img.ID && out != nil { @@ -297,8 +279,8 @@ func (store *TagStore) SetLoad(repoName, tag, imageName string, force bool, out return store.save() } -// SetDigest creates a digest reference to an image ID. -func (store *TagStore) SetDigest(repoName, digest, imageName string) error { +// setDigest creates a digest reference to an image ID. +func (store *TagStore) setDigest(repoName, digest, imageName string) error { img, err := store.LookupImage(imageName) if err != nil { return err @@ -321,7 +303,7 @@ func (store *TagStore) SetDigest(repoName, digest, imageName string) error { repoName = registry.NormalizeLocalName(repoName) repoRefs, exists := store.Repositories[repoName] if !exists { - repoRefs = Repository{} + repoRefs = repository{} store.Repositories[repoName] = repoRefs } else if oldID, exists := repoRefs[digest]; exists && oldID != img.ID { return fmt.Errorf("Conflict: Digest %s is already set to image %s", digest, oldID) @@ -331,8 +313,8 @@ func (store *TagStore) SetDigest(repoName, digest, imageName string) error { return store.save() } -// Get returns a repo from the store. -func (store *TagStore) Get(repoName string) (Repository, error) { +// get returns the repository tag/image map for a given repository. +func (store *TagStore) get(repoName string) (repository, error) { store.Lock() defer store.Unlock() if err := store.reload(); err != nil { @@ -345,9 +327,10 @@ func (store *TagStore) Get(repoName string) (Repository, error) { return nil, nil } -// GetImage returns an image from a given repo from the store. -func (store *TagStore) GetImage(repoName, refOrID string) (*image.Image, error) { - repo, err := store.Get(repoName) +// getImage returns a pointer to an Image structure describing the image +// referred to by refOrID inside repository repoName. +func (store *TagStore) getImage(repoName, refOrID string) (*image.Image, error) { + repo, err := store.get(repoName) if err != nil { return nil, err @@ -375,21 +358,6 @@ func (store *TagStore) GetImage(repoName, refOrID string) (*image.Image, error) return nil, nil } -// GetRepoRefs returns list of repos. -func (store *TagStore) GetRepoRefs() map[string][]string { - store.Lock() - reporefs := make(map[string][]string) - - for name, repository := range store.Repositories { - for tag, id := range repository { - shortID := stringid.TruncateID(id) - reporefs[shortID] = append(reporefs[shortID], utils.ImageReference(name, tag)) - } - } - store.Unlock() - return reporefs -} - // validateRepoName validates the name of a repository. func validateRepoName(name string) error { if name == "" { @@ -411,41 +379,46 @@ func validateDigest(dgst string) error { return nil } -func (store *TagStore) poolAdd(kind, key string) (chan struct{}, error) { +// poolAdd checks if a push or pull is already running, and returns +// (broadcaster, true) if a running operation is found. Otherwise, it creates a +// new one and returns (broadcaster, false). +func (store *TagStore) poolAdd(kind, key string) (*broadcaster.Buffered, bool) { store.Lock() defer store.Unlock() - if c, exists := store.pullingPool[key]; exists { - return c, fmt.Errorf("pull %s is already in progress", key) + if p, exists := store.pullingPool[key]; exists { + return p, true } - if c, exists := store.pushingPool[key]; exists { - return c, fmt.Errorf("push %s is already in progress", key) + if p, exists := store.pushingPool[key]; exists { + return p, true } - c := make(chan struct{}) + broadcaster := broadcaster.NewBuffered() + switch kind { case "pull": - store.pullingPool[key] = c + store.pullingPool[key] = broadcaster case "push": - store.pushingPool[key] = c + store.pushingPool[key] = broadcaster default: - return nil, fmt.Errorf("Unknown pool type") + panic("Unknown pool type") } - return c, nil + + return broadcaster, false } -func (store *TagStore) poolRemove(kind, key string) error { +func (store *TagStore) poolRemoveWithError(kind, key string, broadcasterResult error) error { store.Lock() defer store.Unlock() switch kind { case "pull": - if c, exists := store.pullingPool[key]; exists { - close(c) + if broadcaster, exists := store.pullingPool[key]; exists { + broadcaster.CloseWithError(broadcasterResult) delete(store.pullingPool, key) } case "push": - if c, exists := store.pushingPool[key]; exists { - close(c) + if broadcaster, exists := store.pushingPool[key]; exists { + broadcaster.CloseWithError(broadcasterResult) delete(store.pushingPool, key) } default: @@ -453,3 +426,7 @@ func (store *TagStore) poolRemove(kind, key string) error { } return nil } + +func (store *TagStore) poolRemove(kind, key string) error { + return store.poolRemoveWithError(kind, key, nil) +} diff --git a/vendor/github.com/docker/docker/graph/tags/tags.go b/vendor/github.com/docker/docker/graph/tags/tags.go index cbd0f6bc..4c9399b4 100644 --- a/vendor/github.com/docker/docker/graph/tags/tags.go +++ b/vendor/github.com/docker/docker/graph/tags/tags.go @@ -2,12 +2,17 @@ package tags import ( "fmt" + "regexp" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/reference" ) -const DEFAULTTAG = "latest" +// DefaultTag defines the default tag used when performing images related actions and no tag string is specified +const DefaultTag = "latest" +var anchoredTagRegexp = regexp.MustCompile(`^` + reference.TagRegexp.String() + `$`) + +// ErrTagInvalidFormat is returned if tag is invalid. type ErrTagInvalidFormat struct { name string } @@ -16,13 +21,15 @@ func (e ErrTagInvalidFormat) Error() string { return fmt.Sprintf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed ('.' and '-' are NOT allowed in the initial), minimum 1, maximum 128 in length", e.name) } -// ValidateTagName validates the name of a tag +// ValidateTagName validates the name of a tag. +// It returns an error if the given name is an emtpy string. +// If name is not valid, it returns ErrTagInvalidFormat func ValidateTagName(name string) error { if name == "" { return fmt.Errorf("tag name can't be empty") } - if !v2.TagNameAnchoredRegexp.MatchString(name) { + if !anchoredTagRegexp.MatchString(name) { return ErrTagInvalidFormat{name} } return nil diff --git a/vendor/github.com/docker/docker/graph/tags/tags_unit_test.go b/vendor/github.com/docker/docker/graph/tags/tags_unit_test.go index 5114da10..374e0f05 100644 --- a/vendor/github.com/docker/docker/graph/tags/tags_unit_test.go +++ b/vendor/github.com/docker/docker/graph/tags/tags_unit_test.go @@ -14,10 +14,10 @@ func TestValidTagName(t *testing.T) { } func TestInvalidTagName(t *testing.T) { - validTags := []string{"-9", ".foo", "-test", ".", "-"} - for _, tag := range validTags { + inValidTags := []string{"-9", ".foo", "-test", ".", "-"} + for _, tag := range inValidTags { if err := ValidateTagName(tag); err == nil { - t.Errorf("'%s' shouldn't have been a valid tag", tag) + t.Errorf("'%s' should've been an invalid tag", tag) } } } diff --git a/vendor/github.com/docker/docker/graph/tags_unit_test.go b/vendor/github.com/docker/docker/graph/tags_unit_test.go index 5effb5c6..0406f154 100644 --- a/vendor/github.com/docker/docker/graph/tags_unit_test.go +++ b/vendor/github.com/docker/docker/graph/tags_unit_test.go @@ -11,8 +11,8 @@ import ( "github.com/docker/docker/daemon/events" "github.com/docker/docker/daemon/graphdriver" _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests + "github.com/docker/docker/graph/tags" "github.com/docker/docker/image" - "github.com/docker/docker/trust" "github.com/docker/docker/utils" ) @@ -53,16 +53,11 @@ func fakeTar() (io.Reader, error) { } func mkTestTagStore(root string, t *testing.T) *TagStore { - driver, err := graphdriver.New(root, nil) + driver, err := graphdriver.New(root, nil, nil, nil) if err != nil { t.Fatal(err) } - graph, err := NewGraph(root, driver) - if err != nil { - t.Fatal(err) - } - - trust, err := trust.NewTrustStore(root + "/trust") + graph, err := NewGraph(root, driver, nil, nil) if err != nil { t.Fatal(err) } @@ -70,7 +65,6 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { tagCfg := &TagStoreConfig{ Graph: graph, Events: events.New(), - Trust: trust, } store, err := NewTagStore(path.Join(root, "tags"), tagCfg) if err != nil { @@ -81,7 +75,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { t.Fatal(err) } img := &image.Image{ID: testOfficialImageID} - if err := graph.Register(img, officialArchive); err != nil { + if err := graph.Register(v1Descriptor{img}, officialArchive); err != nil { t.Fatal(err) } if err := store.Tag(testOfficialImageName, "", testOfficialImageID, false); err != nil { @@ -92,13 +86,13 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { t.Fatal(err) } img = &image.Image{ID: testPrivateImageID} - if err := graph.Register(img, privateArchive); err != nil { + if err := graph.Register(v1Descriptor{img}, privateArchive); err != nil { t.Fatal(err) } if err := store.Tag(testPrivateImageName, "", testPrivateImageID, false); err != nil { t.Fatal(err) } - if err := store.SetDigest(testPrivateImageName, testPrivateImageDigest, testPrivateImageID); err != nil { + if err := store.setDigest(testPrivateImageName, testPrivateImageDigest, testPrivateImageID); err != nil { t.Fatal(err) } return store @@ -119,17 +113,17 @@ func TestLookupImage(t *testing.T) { testOfficialImageName + ":" + testOfficialImageID, testOfficialImageName + ":" + testOfficialImageIDShort, testOfficialImageName, - testOfficialImageName + ":" + DefaultTag, + testOfficialImageName + ":" + tags.DefaultTag, "docker.io/" + testOfficialImageName, - "docker.io/" + testOfficialImageName + ":" + DefaultTag, + "docker.io/" + testOfficialImageName + ":" + tags.DefaultTag, "index.docker.io/" + testOfficialImageName, - "index.docker.io/" + testOfficialImageName + ":" + DefaultTag, + "index.docker.io/" + testOfficialImageName + ":" + tags.DefaultTag, "library/" + testOfficialImageName, - "library/" + testOfficialImageName + ":" + DefaultTag, + "library/" + testOfficialImageName + ":" + tags.DefaultTag, "docker.io/library/" + testOfficialImageName, - "docker.io/library/" + testOfficialImageName + ":" + DefaultTag, + "docker.io/library/" + testOfficialImageName + ":" + tags.DefaultTag, "index.docker.io/library/" + testOfficialImageName, - "index.docker.io/library/" + testOfficialImageName + ":" + DefaultTag, + "index.docker.io/library/" + testOfficialImageName + ":" + tags.DefaultTag, } privateLookups := []string{ @@ -138,7 +132,7 @@ func TestLookupImage(t *testing.T) { testPrivateImageName + ":" + testPrivateImageID, testPrivateImageName + ":" + testPrivateImageIDShort, testPrivateImageName, - testPrivateImageName + ":" + DefaultTag, + testPrivateImageName + ":" + tags.DefaultTag, } invalidLookups := []string{ diff --git a/vendor/github.com/docker/docker/image/fixtures/post1.9/expected_computed_id b/vendor/github.com/docker/docker/image/fixtures/post1.9/expected_computed_id new file mode 100644 index 00000000..cba6d81f --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/post1.9/expected_computed_id @@ -0,0 +1 @@ +sha256:f2722a8ec6926e02fa9f2674072cbc2a25cf0f449f27350f613cd843b02c9105 diff --git a/vendor/github.com/docker/docker/image/fixtures/post1.9/expected_config b/vendor/github.com/docker/docker/image/fixtures/post1.9/expected_config new file mode 100644 index 00000000..ae27bdd4 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/post1.9/expected_config @@ -0,0 +1 @@ +{"architecture":"amd64","config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":null,"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"fb1f7270da9519308361b99dc8e0d30f12c24dfd28537c2337ece995ac853a16","container_config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":["/bin/sh","-c","#(nop) ADD file:11998b2a4d664a75cd0c3f4e4cb1837434e0f997ba157a0ac1d3c68a07aa2f4f in /"],"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-09-08T21:30:30.807853054Z","docker_version":"1.9.0-dev","layer_id":"sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a","os":"linux","parent_id":"sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02"} diff --git a/vendor/github.com/docker/docker/image/fixtures/post1.9/layer_id b/vendor/github.com/docker/docker/image/fixtures/post1.9/layer_id new file mode 100644 index 00000000..ded2db28 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/post1.9/layer_id @@ -0,0 +1 @@ +sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a diff --git a/vendor/github.com/docker/docker/image/fixtures/post1.9/parent_id b/vendor/github.com/docker/docker/image/fixtures/post1.9/parent_id new file mode 100644 index 00000000..7d524f80 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/post1.9/parent_id @@ -0,0 +1 @@ +sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02 diff --git a/vendor/github.com/docker/docker/image/fixtures/post1.9/v1compatibility b/vendor/github.com/docker/docker/image/fixtures/post1.9/v1compatibility new file mode 100644 index 00000000..d6697c2b --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/post1.9/v1compatibility @@ -0,0 +1 @@ +{"id":"8dfb96b5d09e6cf6f376d81f1e2770ee5ede309f9bd9e079688c9782649ab326","parent":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","created":"2015-09-08T21:30:30.807853054Z","container":"fb1f7270da9519308361b99dc8e0d30f12c24dfd28537c2337ece995ac853a16","container_config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":["/bin/sh","-c","#(nop) ADD file:11998b2a4d664a75cd0c3f4e4cb1837434e0f997ba157a0ac1d3c68a07aa2f4f in /"],"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"docker_version":"1.9.0-dev","config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":null,"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux"} diff --git a/vendor/github.com/docker/docker/image/fixtures/pre1.9/expected_computed_id b/vendor/github.com/docker/docker/image/fixtures/pre1.9/expected_computed_id new file mode 100644 index 00000000..98a34f8c --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/pre1.9/expected_computed_id @@ -0,0 +1 @@ +sha256:731d62ca192955d38edd4333c89aad021002b1e570daa6bb9f8f06b500c76a4d diff --git a/vendor/github.com/docker/docker/image/fixtures/pre1.9/expected_config b/vendor/github.com/docker/docker/image/fixtures/pre1.9/expected_config new file mode 100644 index 00000000..e29c9d34 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/pre1.9/expected_config @@ -0,0 +1 @@ +{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":null,"Entrypoint":["/go/bin/dnsdock"],"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Hostname":"03797203757d","Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"/go","Domainname":"","User":""},"container":"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) ENTRYPOINT [\"/go/bin/dnsdock\"]"],"Entrypoint":["/go/bin/dnsdock"],"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Hostname":"03797203757d","Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"/go","Domainname":"","User":""},"created":"2015-08-19T16:49:11.368300679Z","docker_version":"1.6.2","layer_id":"sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a","os":"linux","parent_id":"sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02"} diff --git a/vendor/github.com/docker/docker/image/fixtures/pre1.9/layer_id b/vendor/github.com/docker/docker/image/fixtures/pre1.9/layer_id new file mode 100644 index 00000000..ded2db28 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/pre1.9/layer_id @@ -0,0 +1 @@ +sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a diff --git a/vendor/github.com/docker/docker/image/fixtures/pre1.9/parent_id b/vendor/github.com/docker/docker/image/fixtures/pre1.9/parent_id new file mode 100644 index 00000000..7d524f80 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/pre1.9/parent_id @@ -0,0 +1 @@ +sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02 diff --git a/vendor/github.com/docker/docker/image/fixtures/pre1.9/v1compatibility b/vendor/github.com/docker/docker/image/fixtures/pre1.9/v1compatibility new file mode 100644 index 00000000..af96e825 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fixtures/pre1.9/v1compatibility @@ -0,0 +1 @@ +{"id":"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9","parent":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","created":"2015-08-19T16:49:11.368300679Z","container":"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253","container_config":{"Hostname":"03797203757d","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":["/bin/sh","-c","#(nop) ENTRYPOINT [\"/go/bin/dnsdock\"]"],"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"NetworkDisabled":false,"MacAddress":"","OnBuild":[],"Labels":{}},"docker_version":"1.6.2","config":{"Hostname":"03797203757d","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":null,"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"NetworkDisabled":false,"MacAddress":"","OnBuild":[],"Labels":{}},"architecture":"amd64","os":"linux","Size":0} diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go index a405b538..89799160 100644 --- a/vendor/github.com/docker/docker/image/image.go +++ b/vendor/github.com/docker/docker/image/image.go @@ -6,15 +6,34 @@ import ( "regexp" "time" + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/version" "github.com/docker/docker/runconfig" ) var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = version.Version("1.8.3") + +// Descriptor provides the information necessary to register an image in +// the graph. +type Descriptor interface { + ID() string + Parent() string + MarshalConfig() ([]byte, error) +} + // Image stores the image configuration. +// All fields in this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. type Image struct { // ID a unique 64 character identifier of the image - ID string `json:"id"` + ID string `json:"id,omitempty"` // Parent id of the image Parent string `json:"parent,omitempty"` // Comment user added comment @@ -36,7 +55,11 @@ type Image struct { // OS is the operating system used to build and run the image OS string `json:"os,omitempty"` // Size is the total size of the image including all layers it is composed of - Size int64 + Size int64 `json:",omitempty"` // capitalized for backwards compatibility + // ParentID specifies the strong, content address of the parent configuration. + ParentID digest.Digest `json:"parent_id,omitempty"` + // LayerID provides the content address of the associated layer. + LayerID digest.Digest `json:"layer_id,omitempty"` } // NewImgJSON creates an Image configuration from json. @@ -53,7 +76,74 @@ func NewImgJSON(src []byte) (*Image, error) { // ValidateID checks whether an ID string is a valid image ID. func ValidateID(id string) error { if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID '%s' is invalid", id) + return derr.ErrorCodeInvalidImageID.WithArgs(id) } return nil } + +// MakeImageConfig returns immutable configuration JSON for image based on the +// v1Compatibility object, layer digest and parent StrongID. SHA256() of this +// config is the new image ID (strongID). +func MakeImageConfig(v1Compatibility []byte, layerID, parentID digest.Digest) ([]byte, error) { + + // Detect images created after 1.8.3 + img, err := NewImgJSON(v1Compatibility) + if err != nil { + return nil, err + } + useFallback := version.Version(img.DockerVersion).LessThan(noFallbackMinVersion) + + if useFallback { + // Fallback for pre-1.8.3. Calculate base config based on Image struct + // so that fields with default values added by Docker will use same ID + logrus.Debugf("Using fallback hash for %v", layerID) + + v1Compatibility, err = json.Marshal(img) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(v1Compatibility, &c); err != nil { + return nil, err + } + + if err := layerID.Validate(); err != nil { + return nil, fmt.Errorf("invalid layerID: %v", err) + } + + c["layer_id"] = rawJSON(layerID) + + if parentID != "" { + if err := parentID.Validate(); err != nil { + return nil, fmt.Errorf("invalid parentID %v", err) + } + c["parent_id"] = rawJSON(parentID) + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsitent + + return json.Marshal(c) +} + +// StrongID returns image ID for the config JSON. +func StrongID(configJSON []byte) (digest.Digest, error) { + digester := digest.Canonical.New() + if _, err := digester.Hash().Write(configJSON); err != nil { + return "", err + } + dgst := digester.Digest() + logrus.Debugf("H(%v) = %v", string(configJSON), dgst) + return dgst, nil +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/docker/docker/image/image_test.go b/vendor/github.com/docker/docker/image/image_test.go new file mode 100644 index 00000000..77d92c44 --- /dev/null +++ b/vendor/github.com/docker/docker/image/image_test.go @@ -0,0 +1,55 @@ +package image + +import ( + "bytes" + "io/ioutil" + "testing" + + "github.com/docker/distribution/digest" +) + +var fixtures = []string{ + "fixtures/pre1.9", + "fixtures/post1.9", +} + +func loadFixtureFile(t *testing.T, path string) []byte { + fileData, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("error opening %s: %v", path, err) + } + + return bytes.TrimSpace(fileData) +} + +// TestMakeImageConfig makes sure that MakeImageConfig returns the expected +// canonical JSON for a reference Image. +func TestMakeImageConfig(t *testing.T) { + for _, fixture := range fixtures { + v1Compatibility := loadFixtureFile(t, fixture+"/v1compatibility") + expectedConfig := loadFixtureFile(t, fixture+"/expected_config") + layerID := digest.Digest(loadFixtureFile(t, fixture+"/layer_id")) + parentID := digest.Digest(loadFixtureFile(t, fixture+"/parent_id")) + + json, err := MakeImageConfig(v1Compatibility, layerID, parentID) + if err != nil { + t.Fatalf("MakeImageConfig on %s returned error: %v", fixture, err) + } + if !bytes.Equal(json, expectedConfig) { + t.Fatalf("did not get expected JSON for %s\nexpected: %s\ngot: %s", fixture, expectedConfig, json) + } + } +} + +// TestGetStrongID makes sure that GetConfigJSON returns the expected +// hash for a reference Image. +func TestGetStrongID(t *testing.T) { + for _, fixture := range fixtures { + expectedConfig := loadFixtureFile(t, fixture+"/expected_config") + expectedComputedID := digest.Digest(loadFixtureFile(t, fixture+"/expected_computed_id")) + + if id, err := StrongID(expectedConfig); err != nil || id != expectedComputedID { + t.Fatalf("did not get expected ID for %s\nexpected: %s\ngot: %s\nerror: %v", fixture, expectedComputedID, id, err) + } + } +} diff --git a/vendor/github.com/docker/docker/opts/envfile.go b/vendor/github.com/docker/docker/opts/envfile.go index b854227e..ba8b4f20 100644 --- a/vendor/github.com/docker/docker/opts/envfile.go +++ b/vendor/github.com/docker/docker/opts/envfile.go @@ -4,18 +4,22 @@ import ( "bufio" "fmt" "os" - "regexp" "strings" ) -var ( - // EnvironmentVariableRegexp A regexp to validate correct environment variables - // Environment variables set by the user must have a name consisting solely of - // alphabetics, numerics, and underscores - the first of which must not be numeric. - EnvironmentVariableRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") -) - -// ParseEnvFile Read in a line delimited file with environment variables enumerated +// ParseEnvFile reads a file with environment variables enumerated by lines +// +// ``Environment variable names used by the utilities in the Shell and +// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase +// letters, digits, and the '_' (underscore) from the characters defined in +// Portable Character Set and do not begin with a digit. *But*, other +// characters may be permitted by an implementation; applications shall +// tolerate the presence of such names.'' +// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html +// +// As of #16585, it's up to application inside docker to validate or not +// environment variables, that's why we just strip leading whitespace and +// nothing more. func ParseEnvFile(filename string) ([]string, error) { fh, err := os.Open(filename) if err != nil { @@ -26,17 +30,18 @@ func ParseEnvFile(filename string) ([]string, error) { lines := []string{} scanner := bufio.NewScanner(fh) for scanner.Scan() { - line := scanner.Text() + // trim the line from all leading whitespace first + line := strings.TrimLeft(scanner.Text(), whiteSpaces) // line is not empty, and not starting with '#' if len(line) > 0 && !strings.HasPrefix(line, "#") { data := strings.SplitN(line, "=", 2) // trim the front of a variable, but nothing else variable := strings.TrimLeft(data[0], whiteSpaces) - - if !EnvironmentVariableRegexp.MatchString(variable) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", variable)} + if strings.ContainsAny(variable, whiteSpaces) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} } + if len(data) > 1 { // pass the value through, no trimming diff --git a/vendor/github.com/docker/docker/opts/envfile_test.go b/vendor/github.com/docker/docker/opts/envfile_test.go index cd0ca8f3..a172267b 100644 --- a/vendor/github.com/docker/docker/opts/envfile_test.go +++ b/vendor/github.com/docker/docker/opts/envfile_test.go @@ -28,8 +28,15 @@ func TestParseEnvFileGoodFile(t *testing.T) { # comment _foobar=foobaz +with.dots=working +and_underscore=working too ` - + // Adding a newline + a line with pure whitespace. + // This is being done like this instead of the block above + // because it's common for editors to trim trailing whitespace + // from lines, which becomes annoying since that's the + // exact thing we need to test. + content += "\n \t " tmpFile := tmpFileWithContent(content, t) defer os.Remove(tmpFile) @@ -42,6 +49,8 @@ _foobar=foobaz "foo=bar", "baz=quux", "_foobar=foobaz", + "with.dots=working", + "and_underscore=working too", } if !reflect.DeepEqual(lines, expectedLines) { @@ -91,7 +100,7 @@ func TestParseEnvFileBadlyFormattedFile(t *testing.T) { if _, ok := err.(ErrBadEnvVariable); !ok { t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err) } - expectedMessage := "poorly formatted environment: variable 'f ' is not a valid environment variable" + expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" if err.Error() != expectedMessage { t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) } @@ -126,7 +135,7 @@ another invalid line` if _, ok := err.(ErrBadEnvVariable); !ok { t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err) } - expectedMessage := "poorly formatted environment: variable 'first line' is not a valid environment variable" + expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" if err.Error() != expectedMessage { t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) } diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go index a29335e6..611407a9 100644 --- a/vendor/github.com/docker/docker/opts/hosts_unix.go +++ b/vendor/github.com/docker/docker/opts/hosts_unix.go @@ -4,4 +4,5 @@ package opts import "fmt" +// DefaultHost constant defines the default host string used by docker on other hosts than Windows var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go index 55eac2aa..ec52e9a7 100644 --- a/vendor/github.com/docker/docker/opts/hosts_windows.go +++ b/vendor/github.com/docker/docker/opts/hosts_windows.go @@ -2,6 +2,5 @@ package opts -import "fmt" - -var DefaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = DefaultTCPHost diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go index b1f95875..d787b56c 100644 --- a/vendor/github.com/docker/docker/opts/ip.go +++ b/vendor/github.com/docker/docker/opts/ip.go @@ -5,20 +5,25 @@ import ( "net" ) -// IpOpt type that hold an IP -type IpOpt struct { +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { *net.IP } -func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt { - o := &IpOpt{ +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ IP: ref, } o.Set(defaultVal) return o } -func (o *IpOpt) Set(val string) error { +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parsable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { ip := net.ParseIP(val) if ip == nil { return fmt.Errorf("%s is not an ip address", val) @@ -27,7 +32,9 @@ func (o *IpOpt) Set(val string) error { return nil } -func (o *IpOpt) String() string { +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { if *o.IP == nil { return "" } diff --git a/vendor/github.com/docker/docker/opts/ip_test.go b/vendor/github.com/docker/docker/opts/ip_test.go index b6b526a5..1027d84a 100644 --- a/vendor/github.com/docker/docker/opts/ip_test.go +++ b/vendor/github.com/docker/docker/opts/ip_test.go @@ -10,7 +10,7 @@ func TestIpOptString(t *testing.T) { var ip net.IP for _, address := range addresses { - stringAddress := NewIpOpt(&ip, address).String() + stringAddress := NewIPOpt(&ip, address).String() if stringAddress != address { t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) } @@ -21,7 +21,7 @@ func TestNewIpOptInvalidDefaultVal(t *testing.T) { ip := net.IPv4(127, 0, 0, 1) defaultVal := "Not an ip" - ipOpt := NewIpOpt(&ip, defaultVal) + ipOpt := NewIPOpt(&ip, defaultVal) expected := "127.0.0.1" if ipOpt.String() != expected { @@ -33,7 +33,7 @@ func TestNewIpOptValidDefaultVal(t *testing.T) { ip := net.IPv4(127, 0, 0, 1) defaultVal := "192.168.1.1" - ipOpt := NewIpOpt(&ip, defaultVal) + ipOpt := NewIPOpt(&ip, defaultVal) expected := "192.168.1.1" if ipOpt.String() != expected { @@ -43,11 +43,11 @@ func TestNewIpOptValidDefaultVal(t *testing.T) { func TestIpOptSetInvalidVal(t *testing.T) { ip := net.IPv4(127, 0, 0, 1) - ipOpt := &IpOpt{IP: &ip} + ipOpt := &IPOpt{IP: &ip} - invalidIp := "invalid ip" + invalidIP := "invalid ip" expectedError := "invalid ip is not an ip address" - err := ipOpt.Set(invalidIp) + err := ipOpt.Set(invalidIP) if err == nil || err.Error() != expectedError { t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) } diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go index 115ed578..a61b82cd 100644 --- a/vendor/github.com/docker/docker/opts/opts.go +++ b/vendor/github.com/docker/docker/opts/opts.go @@ -9,36 +9,42 @@ import ( "strings" "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/volume" ) var ( alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) - // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080 - DefaultHTTPHost = "127.0.0.1" - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp:// + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter // is not supplied. A better longer term solution would be to use a named // pipe as the default on the Windows daemon. + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port // DefaultUnixSocket Path for the unix socket. // Docker daemon by default always listens on the default unix socket DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) ) -// ListOpts type that hold a list of values and a validation function. +// ListOpts holds a list of values and a validation function. type ListOpts struct { values *[]string validator ValidatorFctType } -// NewListOpts Create a new ListOpts with the specified validator. +// NewListOpts creates a new ListOpts with the specified validator. func NewListOpts(validator ValidatorFctType) ListOpts { var values []string return *NewListOptsRef(&values, validator) } +// NewListOptsRef creates a new ListOpts with the specified values and validator. func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { return &ListOpts{ values: values, @@ -64,7 +70,7 @@ func (opts *ListOpts) Set(value string) error { return nil } -// Delete remove the given element from the slice. +// Delete removes the specified element from the slice. func (opts *ListOpts) Delete(key string) { for i, k := range *opts.values { if k == key { @@ -76,7 +82,6 @@ func (opts *ListOpts) Delete(key string) { // GetMap returns the content of values in a map in order to avoid // duplicates. -// FIXME: can we remove this? func (opts *ListOpts) GetMap() map[string]struct{} { ret := make(map[string]struct{}) for _, k := range *opts.values { @@ -85,13 +90,12 @@ func (opts *ListOpts) GetMap() map[string]struct{} { return ret } -// GetAll returns the values' slice. -// FIXME: Can we remove this? +// GetAll returns the values of slice. func (opts *ListOpts) GetAll() []string { return (*opts.values) } -// Get checks the existence of the given key. +// Get checks the existence of the specified key. func (opts *ListOpts) Get(key string) bool { for _, k := range *opts.values { if k == key { @@ -106,7 +110,7 @@ func (opts *ListOpts) Len() int { return len((*opts.values)) } -//MapOpts type that holds a map of values and a validation function. +//MapOpts holds a map of values and a validation function. type MapOpts struct { values map[string]string validator ValidatorFctType @@ -131,10 +135,16 @@ func (opts *MapOpts) Set(value string) error { return nil } +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + func (opts *MapOpts) String() string { return fmt.Sprintf("%v", map[string]string((opts.values))) } +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { if values == nil { values = make(map[string]string) @@ -145,13 +155,13 @@ func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { } } -// ValidatorFctType validator that return a validate string and/or an error +// ValidatorFctType defines a validator function that returns a validated string and/or an error. type ValidatorFctType func(val string) (string, error) -// ValidatorFctListType validator that return a validate list of string and/or an error +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error type ValidatorFctListType func(val string) ([]string, error) -// ValidateAttach Validates that the specified string is a valid attach option. +// ValidateAttach validates that the specified string is a valid attach option. func ValidateAttach(val string) (string, error) { s := strings.ToLower(val) for _, str := range []string{"stdin", "stdout", "stderr"} { @@ -162,7 +172,7 @@ func ValidateAttach(val string) (string, error) { return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") } -// ValidateLink Validates that the specified string has a valid link format (containerName:alias). +// ValidateLink validates that the specified string has a valid link format (containerName:alias). func ValidateLink(val string) (string, error) { if _, _, err := parsers.ParseLink(val); err != nil { return val, err @@ -170,53 +180,66 @@ func ValidateLink(val string) (string, error) { return val, nil } -// ValidateDevice Validate a path for devices +// ValidDeviceMode checks if the mode for device is valid or not. +// Valid mode is a composition of r (read), w (write), and m (mknod). +func ValidDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// ValidateDevice validates a path for devices // It will make sure 'val' is in the form: // [host-dir:]container-path[:mode] +// It also validates the device mode. func ValidateDevice(val string) (string, error) { - return validatePath(val, false) + return validatePath(val, ValidDeviceMode) } -// ValidatePath Validate a path for volumes -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:rw|ro] -// It will also validate the mount mode. -func ValidatePath(val string) (string, error) { - return validatePath(val, true) -} - -func validatePath(val string, validateMountMode bool) (string, error) { +func validatePath(val string, validator func(string) bool) (string, error) { var containerPath string var mode string if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for volumes: %s", val) + return val, fmt.Errorf("bad format for path: %s", val) } - splited := strings.SplitN(val, ":", 3) - if splited[0] == "" { - return val, fmt.Errorf("bad format for volumes: %s", val) + split := strings.SplitN(val, ":", 3) + if split[0] == "" { + return val, fmt.Errorf("bad format for path: %s", val) } - switch len(splited) { + switch len(split) { case 1: - containerPath = splited[0] + containerPath = split[0] val = path.Clean(containerPath) case 2: - if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid { - containerPath = splited[0] - mode = splited[1] + if isValid := validator(split[1]); isValid { + containerPath = split[0] + mode = split[1] val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) } else { - containerPath = splited[1] - val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath)) + containerPath = split[1] + val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) } case 3: - containerPath = splited[1] - mode = splited[2] - if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid { - return val, fmt.Errorf("bad mount mode specified : %s", mode) + containerPath = split[1] + mode = split[2] + if isValid := validator(split[2]); !isValid { + return val, fmt.Errorf("bad mode specified: %s", mode) } - val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode) + val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) } if !path.IsAbs(containerPath) { @@ -225,24 +248,24 @@ func validatePath(val string, validateMountMode bool) (string, error) { return val, nil } -// ValidateEnv Validate an environment variable and returns it -// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid. +// ValidateEnv validates an environment variable and returns it. // If no value is specified, it returns the current value using os.Getenv. +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validate what so ever, it's up to application inside docker +// to validate them or not. func ValidateEnv(val string) (string, error) { arr := strings.Split(val, "=") if len(arr) > 1 { return val, nil } - if !EnvironmentVariableRegexp.MatchString(arr[0]) { - return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)} - } if !doesEnvExist(val) { return val, nil } return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil } -// ValidateIPAddress Validates an Ip address +// ValidateIPAddress validates an Ip address. func ValidateIPAddress(val string) (string, error) { var ip = net.ParseIP(strings.TrimSpace(val)) if ip != nil { @@ -251,7 +274,7 @@ func ValidateIPAddress(val string) (string, error) { return "", fmt.Errorf("%s is not an ip address", val) } -// ValidateMACAddress Validates a MAC address +// ValidateMACAddress validates a MAC address. func ValidateMACAddress(val string) (string, error) { _, err := net.ParseMAC(strings.TrimSpace(val)) if err != nil { @@ -260,8 +283,8 @@ func ValidateMACAddress(val string) (string, error) { return val, nil } -// ValidateDNSSearch Validates domain for resolvconf search configuration. -// A zero length domain is represented by . +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). func ValidateDNSSearch(val string) (string, error) { if val = strings.Trim(val, " "); val == "." { return val, nil @@ -280,8 +303,8 @@ func validateDomain(val string) (string, error) { return "", fmt.Errorf("%s is not a valid domain", val) } -// ValidateExtraHost Validate that the given string is a valid extrahost and returns it -// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6) +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). func ValidateExtraHost(val string) (string, error) { // allow for IPv6 addresses in extra hosts by only splitting on first ":" arr := strings.SplitN(val, ":", 2) @@ -294,8 +317,8 @@ func ValidateExtraHost(val string) (string, error) { return val, nil } -// ValidateLabel Validate that the given string is a valid label, and returns it -// Labels are in the form on key=value +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. func ValidateLabel(val string) (string, error) { if strings.Count(val, "=") < 1 { return "", fmt.Errorf("bad attribute format: %s", val) @@ -303,9 +326,20 @@ func ValidateLabel(val string) (string, error) { return val, nil } -// ValidateHost Validate that the given string is a valid host and returns it +// ValidateHost validates that the specified string is a valid host and returns it. func ValidateHost(val string) (string, error) { - host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val) + _, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) + if err != nil { + return val, err + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for tls + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultHost, val string) (string, error) { + host, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) if err != nil { return val, err } diff --git a/vendor/github.com/docker/docker/opts/opts_test.go b/vendor/github.com/docker/docker/opts/opts_test.go index f08df30b..e02d3f8e 100644 --- a/vendor/github.com/docker/docker/opts/opts_test.go +++ b/vendor/github.com/docker/docker/opts/opts_test.go @@ -3,6 +3,7 @@ package opts import ( "fmt" "os" + "runtime" "strings" "testing" ) @@ -273,58 +274,6 @@ func TestValidateLink(t *testing.T) { } } -func TestValidatePath(t *testing.T) { - valid := []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:ro", - "/hostPath:/containerPath:rw", - "/rw:/ro", - "/path:rw", - "/path:ro", - "/rw:rw", - } - invalid := map[string]string{ - "": "bad format for volumes: ", - "./": "./ is not an absolute path", - "../": "../ is not an absolute path", - "/:../": "../ is not an absolute path", - "/:path": "path is not an absolute path", - ":": "bad format for volumes: :", - "/tmp:": " is not an absolute path", - ":test": "bad format for volumes: :test", - ":/test": "bad format for volumes: :/test", - "tmp:": " is not an absolute path", - ":test:": "bad format for volumes: :test:", - "::": "bad format for volumes: ::", - ":::": "bad format for volumes: :::", - "/tmp:::": "bad format for volumes: /tmp:::", - ":/tmp::": "bad format for volumes: :/tmp::", - "path:ro": "path is not an absolute path", - "/path:/path:sw": "bad mount mode specified : sw", - "/path:/path:rwz": "bad mount mode specified : rwz", - } - - for _, path := range valid { - if _, err := ValidatePath(path); err != nil { - t.Fatalf("ValidatePath(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ValidatePath(path); err == nil { - t.Fatalf("ValidatePath(`%q`) should have failed validation", path) - } else { - if err.Error() != expectedError { - t.Fatalf("ValidatePath(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) - } - } - } -} func TestValidateDevice(t *testing.T) { valid := []string{ "/home", @@ -333,27 +282,30 @@ func TestValidateDevice(t *testing.T) { "/with space", "/home:/with space", "relative:/absolute-path", - "hostPath:/containerPath:ro", + "hostPath:/containerPath:r", "/hostPath:/containerPath:rw", "/hostPath:/containerPath:mrw", } invalid := map[string]string{ - "": "bad format for volumes: ", + "": "bad format for path: ", "./": "./ is not an absolute path", "../": "../ is not an absolute path", "/:../": "../ is not an absolute path", "/:path": "path is not an absolute path", - ":": "bad format for volumes: :", + ":": "bad format for path: :", "/tmp:": " is not an absolute path", - ":test": "bad format for volumes: :test", - ":/test": "bad format for volumes: :/test", + ":test": "bad format for path: :test", + ":/test": "bad format for path: :/test", "tmp:": " is not an absolute path", - ":test:": "bad format for volumes: :test:", - "::": "bad format for volumes: ::", - ":::": "bad format for volumes: :::", - "/tmp:::": "bad format for volumes: /tmp:::", - ":/tmp::": "bad format for volumes: :/tmp::", + ":test:": "bad format for path: :test:", + "::": "bad format for path: ::", + ":::": "bad format for path: :::", + "/tmp:::": "bad format for path: /tmp:::", + ":/tmp::": "bad format for path: :/tmp::", "path:ro": "ro is not an absolute path", + "path:rr": "rr is not an absolute path", + "a:/b:ro": "bad mode specified: ro", + "a:/b:rr": "bad mode specified: rr", } for _, path := range valid { @@ -374,35 +326,23 @@ func TestValidateDevice(t *testing.T) { } func TestValidateEnv(t *testing.T) { - invalids := map[string]string{ - "some spaces": "poorly formatted environment: variable 'some spaces' is not a valid environment variable", - "asd!qwe": "poorly formatted environment: variable 'asd!qwe' is not a valid environment variable", - "1asd": "poorly formatted environment: variable '1asd' is not a valid environment variable", - "123": "poorly formatted environment: variable '123' is not a valid environment variable", - } valids := map[string]string{ - "a": "a", - "something": "something", - "_=a": "_=a", - "env1=value1": "env1=value1", - "_env1=value1": "_env1=value1", - "env2=value2=value3": "env2=value2=value3", - "env3=abc!qwe": "env3=abc!qwe", - "env_4=value 4": "env_4=value 4", - "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), - "PATH=something": "PATH=something", - } - for value, expectedError := range invalids { - _, err := ValidateEnv(value) - if err == nil { - t.Fatalf("Expected ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected ErrBadEnvVariable, got [%s]", err) - } - if err.Error() != expectedError { - t.Fatalf("Expected ErrBadEnvVariable with message [%s], got [%s]", expectedError, err.Error()) - } + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + "asd!qwe": "asd!qwe", + "1asd": "1asd", + "123": "123", + "some space": "some space", + " some space before": " some space before", + "some space after ": "some space after ", } for value, expected := range valids { actual, err := ValidateEnv(value) @@ -432,22 +372,30 @@ func TestValidateLabel(t *testing.T) { } } -func TestValidateHost(t *testing.T) { +func TestParseHost(t *testing.T) { invalid := map[string]string{ "anything": "Invalid bind address format: anything", "something with spaces": "Invalid bind address format: something with spaces", "://": "Invalid bind address format: ://", "unknown://": "Invalid bind address format: unknown://", - "tcp://": "Invalid proto, expected tcp: ", "tcp://:port": "Invalid bind address format: :port", "tcp://invalid": "Invalid bind address format: invalid", "tcp://invalid:port": "Invalid bind address format: invalid:port", } + const defaultHTTPHost = "tcp://127.0.0.1:2375" + var defaultHOST = "unix:///var/run/docker.sock" + + if runtime.GOOS == "windows" { + defaultHOST = defaultHTTPHost + } valid := map[string]string{ + "": defaultHOST, "fd://": "fd://", "fd://something": "fd://something", - "tcp://:2375": "tcp://127.0.0.1:2375", // default ip address - "tcp://:2376": "tcp://127.0.0.1:2376", // default ip address + "tcp://host:": "tcp://host:2375", + "tcp://": "tcp://localhost:2375", + "tcp://:2375": "tcp://localhost:2375", // default ip address + "tcp://:2376": "tcp://localhost:2376", // default ip address "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", "tcp://192.168:8080": "tcp://192.168:8080", @@ -458,12 +406,12 @@ func TestValidateHost(t *testing.T) { } for value, errorMessage := range invalid { - if _, err := ValidateHost(value); err == nil || err.Error() != errorMessage { + if _, err := ParseHost(defaultHTTPHost, value); err == nil || err.Error() != errorMessage { t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err) } } for value, expected := range valid { - if actual, err := ValidateHost(value); err != nil || actual != expected { + if actual, err := ParseHost(defaultHTTPHost, value); err != nil || actual != expected { t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) } } diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go new file mode 100644 index 00000000..f1ce844a --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go new file mode 100644 index 00000000..b9ff2bae --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts + +// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP4. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=toDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows TP4 build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go index f8d34365..b41f475b 100644 --- a/vendor/github.com/docker/docker/opts/ulimit.go +++ b/vendor/github.com/docker/docker/opts/ulimit.go @@ -6,10 +6,12 @@ import ( "github.com/docker/docker/pkg/ulimit" ) +// UlimitOpt defines a map of Ulimits type UlimitOpt struct { values *map[string]*ulimit.Ulimit } +// NewUlimitOpt creates a new UlimitOpt func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { if ref == nil { ref = &map[string]*ulimit.Ulimit{} @@ -17,6 +19,7 @@ func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { return &UlimitOpt{ref} } +// Set validates a Ulimit and sets its name as a key in UlimitOpt func (o *UlimitOpt) Set(val string) error { l, err := ulimit.Parse(val) if err != nil { @@ -28,6 +31,7 @@ func (o *UlimitOpt) Set(val string) error { return nil } +// String returns Ulimit values as a string. func (o *UlimitOpt) String() string { var out []string for _, v := range *o.values { @@ -37,6 +41,7 @@ func (o *UlimitOpt) String() string { return fmt.Sprintf("%v", out) } +// GetList returns a slice of pointers to Ulimits. func (o *UlimitOpt) GetList() []*ulimit.Ulimit { var ulimits []*ulimit.Ulimit for _, v := range *o.values { diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 11a707d2..e7c82e10 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -19,35 +19,50 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" ) type ( - Archive io.ReadCloser - ArchiveReader io.Reader - Compression int + // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. + Archive io.ReadCloser + // Reader is a type of io.Reader. + Reader io.Reader + // Compression is the state represtents if compressed or not. + Compression int + // TarChownOptions wraps the chown options UID and GID. TarChownOptions struct { UID, GID int } + // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap ChownOpts *TarChownOptions - Name string IncludeSourceDir bool // When unpacking, specifies whether overwriting a directory with a // non-directory is allowed and vice versa. NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string } // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations Archiver struct { - Untar func(io.Reader, string, *TarOptions) error + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap } // breakoutError is used to differentiate errors related to breaking out @@ -57,17 +72,23 @@ type ( ) var ( + // ErrNotImplemented is the error message of function not implemented. ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar} + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} ) const ( + // Uncompressed represents the uncompressed. Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. Bzip2 + // Gzip is gzip compression algorithm. Gzip + // Xz is xz compression algorithm. Xz ) +// IsArchive checks if it is a archive by the header. func IsArchive(header []byte) bool { compression := DetectCompression(header) if compression != Uncompressed { @@ -78,6 +99,7 @@ func IsArchive(header []byte) bool { return err == nil } +// DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, @@ -95,12 +117,13 @@ func DetectCompression(source []byte) Compression { return Uncompressed } -func xzDecompress(archive io.Reader) (io.ReadCloser, error) { +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { args := []string{"xz", "-d", "-c", "-q"} - return CmdStream(exec.Command(args[0], args[1:]...), archive) + return cmdStream(exec.Command(args[0], args[1:]...), archive) } +// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) @@ -126,17 +149,21 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: - xzReader, err := xzDecompress(buf) + xzReader, chdone, err := xzDecompress(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return readBufWrapper, nil + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) @@ -157,6 +184,7 @@ func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteClose } } +// Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { case Uncompressed: @@ -177,6 +205,8 @@ type tarAppender struct { // for hardlink mapping SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap } // canonicalTarName provides a platform-independent and consistent posix-style @@ -219,14 +249,14 @@ func (ta *tarAppender) addTarFile(path, name string) error { } hdr.Name = name - nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) if err != nil { return err } - // if it's a regular file and has more than 1 link, + // if it's not a directory and has more than 1 link, // it's hardlinked, so set the type flag accordingly - if fi.Mode().IsRegular() && nlink > 1 { + if !fi.IsDir() && hasHardlinks(fi) { // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { @@ -244,6 +274,25 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Xattrs["security.capability"] = string(capability) } + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files + if ta.UIDMaps != nil || ta.GIDMaps != nil { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if err != nil { + return err + } + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + hdr.Gid = xGID + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } @@ -358,19 +407,19 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return err } - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - // syscall.UtimesNano doesn't support a NOFOLLOW flag atm + // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } else { + ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } @@ -388,6 +437,10 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) { // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) if err != nil { @@ -406,6 +459,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) TarWriter: tar.NewWriter(compressWriter), Buffer: pools.BufioWriter32KPool.Get(nil), SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, } defer func() { @@ -454,11 +509,10 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) seen := make(map[string]bool) - var renamedRelFilePath string // For when tar.Options.Name is set for _, include := range options.IncludeFiles { - // We can't use filepath.Join(srcPath, include) because this will - // clean away a trailing "." or "/" which may be important. - walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator)) + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) @@ -503,14 +557,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } seen[relFilePath] = true - // TODO Windows: Verify if this needs to be os.Pathseparator - // Rename the base resource - if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) { - renamedRelFilePath = relFilePath - } - // Set this to make sure the items underneath also get renamed - if options.Name != "" { - relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { @@ -524,12 +581,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return pipeReader, nil } +// Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } // Iterate through the files in the archive. loop: @@ -607,6 +669,28 @@ loop: } trBuf.Reset(tr) + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { return err } @@ -620,8 +704,8 @@ loop: for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := syscall.UtimesNano(path, ts); err != nil { + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } @@ -637,7 +721,7 @@ func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) } -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive must be an uncompressed stream. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { @@ -657,7 +741,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp options.ExcludePatterns = []string{} } - var r io.Reader = tarArchive + r := tarArchive if decompress { decompressedArchive, err := DecompressStream(tarArchive) if err != nil { @@ -670,6 +754,8 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp return Unpack(r, dest, options) } +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { logrus.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) @@ -677,7 +763,15 @@ func (archiver *Archiver) TarUntar(src, dst string) error { return err } defer archive.Close() - return archiver.Untar(archive, dst, nil) + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. @@ -686,13 +780,21 @@ func TarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } +// UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() - if err := archiver.Untar(archive, dst, nil); err != nil { + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + if err := archiver.Untar(archive, dst, options); err != nil { return err } return nil @@ -704,6 +806,10 @@ func UntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { @@ -714,7 +820,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { } // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) - if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(dst, 0755); err != nil { return err } logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) @@ -729,6 +835,9 @@ func CopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) @@ -746,7 +855,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } @@ -767,6 +876,28 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { @@ -782,7 +913,12 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { err = er } }() - return archiver.Untar(r, filepath.Dir(dst), nil) + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err } // CopyFileWithTar emulates the behavior of the 'cp' command-line @@ -797,57 +933,33 @@ func CopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } -// CmdStream executes a command, and returns its stdout as a stream. +// cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. -func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - if input != nil { - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - // Write stdin if any - go func() { - io.Copy(stdin, input) - stdin.Close() - }() - } - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input pipeR, pipeW := io.Pipe() - errChan := make(chan []byte) - // Collect stderr, we will use it in case of an error - go func() { - errText, e := ioutil.ReadAll(stderr) - if e != nil { - errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") - } - errChan <- errText - }() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + // Copy stdout to the returned pipe go func() { - _, err := io.Copy(pipeW, stdout) - if err != nil { - pipeW.CloseWithError(err) - } - errText := <-errChan if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) } else { pipeW.Close() } + close(chdone) }() - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - return pipeR, nil + + return pipeR, chdone, nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents @@ -872,6 +984,8 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) { return &TempArchive{File: f, Size: size}, nil } +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_test.go index b93c76cd..6c54c02d 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_test.go @@ -160,7 +160,7 @@ func TestExtensionXz(t *testing.T) { func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, err := CmdStream(cmd, nil) + out, _, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } @@ -181,7 +181,7 @@ func TestCmdStreamLargeStderr(t *testing.T) { func TestCmdStreamBad(t *testing.T) { badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, err := CmdStream(badCmd, nil) + out, _, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } @@ -196,7 +196,7 @@ func TestCmdStreamBad(t *testing.T) { func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") - out, err := CmdStream(cmd, nil) + out, _, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } @@ -695,7 +695,7 @@ func TestTarWithOptions(t *testing.T) { {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, - {&TarOptions{Name: "test", IncludeFiles: []string{"1"}}, 4}, + {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index 9e1dfad2..abf9ad78 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -6,11 +6,26 @@ import ( "archive/tar" "errors" "os" + "path/filepath" "syscall" "github.com/docker/docker/pkg/system" ) +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a seperate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + // CanonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. @@ -25,7 +40,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { s, ok := stat.(*syscall.Stat_t) if !ok { @@ -33,10 +48,9 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st return } - nlink = uint32(s.Nlink) inode = uint64(s.Ino) - // Currently go does not fil in the major/minors + // Currently go does not fill in the major/minors if s.Mode&syscall.S_IFBLK != 0 || s.Mode&syscall.S_IFCHR != 0 { hdr.Devmajor = int64(major(uint64(s.Rdev))) @@ -46,6 +60,15 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st return } +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + func major(device uint64) uint64 { return (device >> 8) & 0xfff } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go index 10db4bd0..b348cde6 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -6,10 +6,25 @@ import ( "archive/tar" "fmt" "os" + "path/filepath" "strings" + + "github.com/docker/docker/pkg/longpath" ) -// canonicalTarNameForPath returns platform-specific filepath +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a seperate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. func CanonicalTarNameForPath(p string) (string, error) { @@ -34,7 +49,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm } -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows return } @@ -48,3 +63,8 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { return nil } + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go index 72bc71e0..b7abc402 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go @@ -3,10 +3,32 @@ package archive import ( + "io/ioutil" "os" + "path/filepath" "testing" ) +func TestCopyFileWithInvalidDest(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := "c:dest" + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, "src", "src") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err == nil { + t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") + } +} + func TestCanonicalTarNameForPath(t *testing.T) { cases := []struct { in, expected string diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index 689d9a21..e0bd4c46 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -14,34 +14,46 @@ import ( "time" "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) +// ChangeType represents the change type. type ChangeType int const ( + // ChangeModify represents the modify operation. ChangeModify = iota + // ChangeAdd represents the add operation. ChangeAdd + // ChangeDelete represents the delete operation. ChangeDelete ) +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. type Change struct { Path string Kind ChangeType } func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) + return fmt.Sprintf("%s %s", change.Kind, change.Path) } // for sort.Sort @@ -94,7 +106,7 @@ func Changes(layers []string, rw string) ([]Change, error) { } // Skip AUFS metadata - if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched { + if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched { return err } @@ -105,8 +117,8 @@ func Changes(layers []string, rw string) ([]Change, error) { // Find out what kind of modification happened file := filepath.Base(path) // If there is a whiteout, then the file was removed - if strings.HasPrefix(file, ".wh.") { - originalFile := file[len(".wh."):] + if strings.HasPrefix(file, WhiteoutPrefix) { + originalFile := file[len(WhiteoutPrefix):] change.Path = filepath.Join(filepath.Dir(path), originalFile) change.Kind = ChangeDelete } else { @@ -161,20 +173,22 @@ func Changes(layers []string, rw string) ([]Change, error) { return changes, nil } +// FileInfo describes the information of a file. type FileInfo struct { parent *FileInfo name string - stat *system.Stat_t + stat *system.StatT children map[string]*FileInfo capability []byte added bool } -func (root *FileInfo) LookUp(path string) *FileInfo { +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { // As this runs on the daemon side, file paths are OS specific. - parent := root + parent := info if path == string(os.PathSeparator) { - return root + return info } pathElements := strings.Split(path, string(os.PathSeparator)) @@ -275,6 +289,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { } +// Changes add changes to file information. func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { var changes []Change @@ -316,13 +331,29 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) { // ChangesSize calculates the size in bytes of the provided changes, based on newDir. func ChangesSize(newDir string, changes []Change) int64 { - var size int64 + var ( + size int64 + sf = make(map[uint64]struct{}) + ) for _, change := range changes { if change.Kind == ChangeModify || change.Kind == ChangeAdd { file := filepath.Join(newDir, change.Path) - fileInfo, _ := os.Lstat(file) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + if fileInfo != nil && !fileInfo.IsDir() { - size += fileInfo.Size() + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } } } } @@ -330,13 +361,15 @@ func ChangesSize(newDir string, changes []Change) int64 { } // ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change) (Archive, error) { +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { reader, writer := io.Pipe() go func() { ta := &tarAppender{ TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, } // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) @@ -351,7 +384,7 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { if change.Kind == ChangeDelete { whiteOutDir := filepath.Dir(change.Path) whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) timestamp := time.Now() hdr := &tar.Header{ Name: whiteOut[1:], diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go index 9d528e61..5a3282b5 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go @@ -61,7 +61,7 @@ func TestHardLinkOrder(t *testing.T) { sort.Sort(changesByPath(changes)) // ExportChanges - ar, err := ExportChanges(dest, changes) + ar, err := ExportChanges(dest, changes, nil, nil) if err != nil { t.Fatal(err) } @@ -73,7 +73,7 @@ func TestHardLinkOrder(t *testing.T) { // reverse sort sort.Sort(sort.Reverse(changesByPath(changes))) // ExportChanges - arRev, err := ExportChanges(dest, changes) + arRev, err := ExportChanges(dest, changes, nil, nil) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_test.go index 509bdb2e..52daaa64 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_test.go @@ -410,7 +410,7 @@ func TestApplyLayer(t *testing.T) { t.Fatal(err) } - layer, err := ExportChanges(dst, changes) + layer, err := ExportChanges(dst, changes, nil, nil) if err != nil { t.Fatal(err) } @@ -434,6 +434,35 @@ func TestApplyLayer(t *testing.T) { } } +func TestChangesSizeWithHardlinks(t *testing.T) { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(destDir) + + creationSize, err := prepareUntarSourceDirectory(100, destDir, true) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + t.Fatal(err) + } + + got := ChangesSize(destDir, changes) + if got != int64(creationSize) { + t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) + } +} + func TestChangesSizeWithNoChanges(t *testing.T) { size := ChangesSize("/tmp", nil) if size != 0 { @@ -468,7 +497,7 @@ func TestChangesSize(t *testing.T) { } size := ChangesSize(parentPath, changes) if size != 6 { - t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + t.Fatalf("Expected 6 bytes of changes, got %d", size) } } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go index d780f163..3778b732 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -3,16 +3,17 @@ package archive import ( + "os" "syscall" "github.com/docker/docker/pkg/system" ) -func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { // Don't look at size for dirs, its not a good measure of change if oldStat.Mode() != newStat.Mode() || - oldStat.Uid() != newStat.Uid() || - oldStat.Gid() != newStat.Gid() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && @@ -25,3 +26,11 @@ func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { func (info *FileInfo) isDir() bool { return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 } + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go index 4809b7a5..af94243f 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -1,10 +1,12 @@ package archive import ( + "os" + "github.com/docker/docker/pkg/system" ) -func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { // Don't look at size for dirs, its not a good measure of change if oldStat.ModTime() != newStat.ModTime() || @@ -18,3 +20,11 @@ func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { func (info *FileInfo) isDir() bool { return info.parent == nil || info.stat.IsDir() } + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 93c81e84..ecfd0a9c 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -6,11 +6,11 @@ import ( "io" "io/ioutil" "os" - "path" "path/filepath" "strings" "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" ) // Errors used or returned by this file. @@ -29,8 +29,12 @@ var ( // path already ends in a `.` path segment, then another is not added. If the // clean path already ends in a path separator, then another is not added. func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { - if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) { - if !HasTrailingPathSeparator(cleanedPath) { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { // Add a separator if it doesn't already end with one (a cleaned // path would only end in a separator if it is the root). cleanedPath += string(filepath.Separator) @@ -38,60 +42,60 @@ func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { cleanedPath += "." } - if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { cleanedPath += string(filepath.Separator) } return cleanedPath } -// AssertsDirectory returns whether the given path is +// assertsDirectory returns whether the given path is // asserted to be a directory, i.e., the path ends with // a trailing '/' or `/.`, assuming a path separator of `/`. -func AssertsDirectory(path string) bool { - return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path) +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) } -// HasTrailingPathSeparator returns whether the given +// hasTrailingPathSeparator returns whether the given // path ends with the system's path separator character. -func HasTrailingPathSeparator(path string) bool { +func hasTrailingPathSeparator(path string) bool { return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) } -// SpecifiesCurrentDir returns whether the given path specifies +// specifiesCurrentDir returns whether the given path specifies // a "current directory", i.e., the last path segment is `.`. -func SpecifiesCurrentDir(path string) bool { +func specifiesCurrentDir(path string) bool { return filepath.Base(path) == "." } -// SplitPathDirEntry splits the given path between its -// parent directory and its basename in that directory. -func SplitPathDirEntry(localizedPath string) (dir, base string) { - normalizedPath := filepath.ToSlash(localizedPath) - vol := filepath.VolumeName(normalizedPath) - normalizedPath = normalizedPath[len(vol):] +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) - if normalizedPath == "/" { - // Specifies the root path. - return filepath.FromSlash(vol + normalizedPath), "." + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." } - trimmedPath := vol + strings.TrimRight(normalizedPath, "/") - - dir = filepath.FromSlash(path.Dir(trimmedPath)) - base = filepath.FromSlash(path.Base(trimmedPath)) - - return dir, base + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) } -// TarResource archives the resource at the given sourcePath into a Tar +// TarResource archives the resource described by the given CopyInfo to a Tar // archive. A non-nil error is returned if sourcePath does not exist or is // asserted to be a directory but exists as another type of file. // // This function acts as a convenient wrapper around TarWithOptions, which // requires a directory as the source path. TarResource accepts either a // directory or a file path and correctly sets the Tar options. -func TarResource(sourcePath string) (content Archive, err error) { +func TarResource(sourceInfo CopyInfo) (content Archive, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { + sourcePath = normalizePath(sourcePath) if _, err = os.Lstat(sourcePath); err != nil { // Catches the case where the source does not exist or is not a // directory if asserted to be a directory, as this also causes an @@ -99,22 +103,6 @@ func TarResource(sourcePath string) (content Archive, err error) { return } - if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) { - // In the case where the source path is a symbolic link AND it ends - // with a path separator, we will want to evaluate the symbolic link. - trimmedPath := sourcePath[:len(sourcePath)-1] - stat, err := os.Lstat(trimmedPath) - if err != nil { - return nil, err - } - - if stat.Mode()&os.ModeSymlink != 0 { - if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil { - return nil, err - } - } - } - // Separate the source path between it's directory and // the entry in that directory which we are archiving. sourceDir, sourceBase := SplitPathDirEntry(sourcePath) @@ -127,39 +115,150 @@ func TarResource(sourcePath string) (content Archive, err error) { Compression: Uncompressed, IncludeFiles: filter, IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, }) } // CopyInfo holds basic info about the source // or destination path of a copy operation. type CopyInfo struct { - Path string - Exists bool - IsDir bool + Path string + Exists bool + IsDir bool + RebaseName string } -// CopyInfoStatPath stats the given path to create a CopyInfo -// struct representing that resource. If mustExist is true, then -// it is an error if there is no file or directory at the given path. -func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) { - pathInfo := CopyInfo{Path: path} +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string) (CopyInfo, error) { + // Split the given path into its Directory and Base components. We will + // evaluate symlinks in the directory component then append the base. + path = normalizePath(path) + dirPath, basePath := filepath.Split(path) - fileInfo, err := os.Lstat(path) - - if err == nil { - pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir() - } else if os.IsNotExist(err) && !mustExist { - err = nil + resolvedDirPath, err := filepath.EvalSymlinks(dirPath) + if err != nil { + return CopyInfo{}, err } - return pathInfo, err + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath + + var rebaseName string + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil } // PrepareArchiveCopy prepares the given srcContent archive, which should // contain the archived resource described by srcInfo, to the destination // described by dstInfo. Returns the possibly modified content archive along // with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { +func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + // Separate the destination path between its directory and base // components in case the source archive contents need to be rebased. dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) @@ -189,7 +288,7 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds // The source content entries will have to be renamed to have a // basename which matches the destination path's basename. return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case AssertsDirectory(dstInfo.Path): + case assertsDirectory(dstInfo.Path): // The destination does not exist and is asserted to be created as a // directory, but the source content is not a directory. This is an // error condition since you cannot create a directory from a file @@ -208,8 +307,15 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds } // rebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurance of oldBase with newBase at the beginning of entry names. -func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive { +// an occurrence of oldBase with newBase at the beginning of entry names. +func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + rebased, w := io.Pipe() go func() { @@ -255,15 +361,19 @@ func CopyResource(srcPath, dstPath string) error { err error ) + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + // Clean the source and destination paths. srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil { + if srcInfo, err = CopyInfoSourcePath(srcPath); err != nil { return err } - content, err := TarResource(srcPath) + content, err := TarResource(srcInfo) if err != nil { return err } @@ -274,25 +384,14 @@ func CopyResource(srcPath, dstPath string) error { // CopyTo handles extracting the given content whose // entries should be sourced from srcInfo to dstPath. -func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error { - dstInfo, err := CopyInfoStatPath(dstPath, false) +func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) if err != nil { return err } - if !dstInfo.Exists { - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(dstPath) - - dstStat, err := os.Lstat(dstParent) - if err != nil { - return err - } - if !dstStat.IsDir() { - return ErrNotDirectory - } - } - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) if err != nil { return err diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_test.go b/vendor/github.com/docker/docker/pkg/archive/copy_test.go index dd0b3236..25f1811a 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy_test.go @@ -138,13 +138,7 @@ func TestCopyErrSrcNotExists(t *testing.T) { tmpDirA, tmpDirB := getTestTempDirs(t) defer removeAllPaths(tmpDirA, tmpDirB) - content, err := TarResource(filepath.Join(tmpDirA, "file1")) - if err == nil { - content.Close() - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { + if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1")); !os.IsNotExist(err) { t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) } } @@ -158,13 +152,7 @@ func TestCopyErrSrcNotDir(t *testing.T) { // Load A with some sample files and directories. createSampleDir(t, tmpDirA) - content, err := TarResource(joinTrailingSep(tmpDirA, "file1")) - if err == nil { - content.Close() - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { + if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1")); !isNotDir(err) { t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) } } @@ -181,7 +169,7 @@ func TestCopyErrDstParentNotExists(t *testing.T) { srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} // Try with a file source. - content, err := TarResource(srcInfo.Path) + content, err := TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } @@ -199,7 +187,7 @@ func TestCopyErrDstParentNotExists(t *testing.T) { // Try with a directory source. srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - content, err = TarResource(srcInfo.Path) + content, err = TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } @@ -228,7 +216,7 @@ func TestCopyErrDstNotDir(t *testing.T) { // Try with a file source. srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - content, err := TarResource(srcInfo.Path) + content, err := TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } @@ -245,7 +233,7 @@ func TestCopyErrDstNotDir(t *testing.T) { // Try with a directory source. srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - content, err = TarResource(srcInfo.Path) + content, err = TarResource(srcInfo) if err != nil { t.Fatalf("unexpected error %T: %s", err, err) } diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 00000000..e305b5e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 00000000..2b775b45 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index d310a17a..1b08ad33 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -9,23 +9,41 @@ import ( "path/filepath" "runtime" "strings" - "syscall" "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) -func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) + if options == nil { + options = &TarOptions{} + } // Iterate through the files in the archive. for { hdr, err := tr.Next() @@ -55,7 +73,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { // TODO Windows. Once the registry is aware of what images are Windows- // specific or Linux-specific, this warning should be changed to an error // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertantly. + // image but have it tagged as Windows inadvertently. if runtime.GOOS == "windows" { if strings.Contains(hdr.Name, ":") { logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) @@ -80,11 +98,11 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { } // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, ".wh..wh.") { + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { // Regular files inside /.wh..wh.plnk can be used as hardlink targets // We don't want this directory, but we need the files in them so that // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { @@ -97,7 +115,10 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { return 0, err } } - continue + + if hdr.Name != WhiteoutOpaqueDir { + continue + } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) @@ -111,11 +132,38 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { } base := filepath.Base(path) - if strings.HasPrefix(base, ".wh.") { - originalBase := base[len(".wh."):] - originalPath := filepath.Join(filepath.Dir(path), originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } } } else { // If path exits we almost always just want to remove and replace it. @@ -136,7 +184,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { @@ -150,6 +198,27 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { srcData = tmpFile } + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID + } + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { return 0, err } @@ -159,13 +228,13 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } + unpackedPaths[path] = struct{}{} } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := syscall.UtimesNano(path, ts); err != nil { + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return 0, err } } @@ -177,20 +246,20 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { // and applies it to the directory `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { - return applyLayerHandler(dest, layer, true) +func ApplyLayer(dest string, layer Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) } // ApplyUncompressedLayer parses a diff in the standard layer format from // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) { - return applyLayerHandler(dest, layer, false) +func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) } // do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) { +func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { dest = filepath.Clean(dest) // We need to be able to set any perms @@ -206,5 +275,5 @@ func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64 return 0, err } } - return UnpackLayer(dest, layer) + return UnpackLayer(dest, layer, options) } diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_test.go b/vendor/github.com/docker/docker/pkg/archive/diff_test.go index 01ed4372..2b29992d 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff_test.go @@ -2,7 +2,14 @@ package archive import ( "archive/tar" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" "testing" + + "github.com/docker/docker/pkg/ioutils" ) func TestApplyLayerInvalidFilenames(t *testing.T) { @@ -188,3 +195,176 @@ func TestApplyLayerInvalidSymlink(t *testing.T) { } } } + +func TestApplyLayerWhiteouts(t *testing.T) { + wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") + if err != nil { + return + } + defer os.RemoveAll(wd) + + base := []string{ + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "baz", + "foo/", + "foo/.abc", + "foo/.bcd/", + "foo/.bcd/a", + "foo/cde/", + "foo/cde/def", + "foo/cde/efg", + "foo/fgh", + "foobar", + } + + type tcase struct { + change, expected []string + } + + tcases := []tcase{ + { + base, + base, + }, + { + []string{ + ".bay", + ".wh.baz", + "foo/", + "foo/.bce", + "foo/.wh..wh..opq", + "foo/cde/", + "foo/cde/efg", + }, + []string{ + ".bay", + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.bce", + "foo/cde/", + "foo/cde/efg", + "foobar", + }, + }, + { + []string{ + ".bay", + ".wh..baz", + ".wh.foobar", + "foo/", + "foo/.abc", + "foo/.wh.cde", + "bar/", + }, + []string{ + ".bay", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.abc", + "foo/.bce", + }, + }, + { + []string{ + ".abc", + ".wh..wh..opq", + "foobar", + }, + []string{ + ".abc", + "foobar", + }, + }, + } + + for i, tc := range tcases { + l, err := makeTestLayer(tc.change) + if err != nil { + t.Fatal(err) + } + + _, err = UnpackLayer(wd, l, nil) + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + + paths, err := readDirContents(wd) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, paths) { + t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) + } + } + +} + +func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { + tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + for _, p := range paths { + if p[len(p)-1] == filepath.Separator { + if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { + return + } + } else { + if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { + return + } + } + } + archive, err := Tar(tmpDir, Uncompressed) + if err != nil { + return + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + os.RemoveAll(tmpDir) + return err + }), nil +} + +func readDirContents(root string) ([]string, error) { + var files []string + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == root { + return nil + } + rel, err := filepath.Rel(root, path) + if err != nil { + return err + } + if info.IsDir() { + rel = rel + "/" + } + files = append(files, rel) + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/utils_test.go b/vendor/github.com/docker/docker/pkg/archive/utils_test.go index f5cacea8..98719032 100644 --- a/vendor/github.com/docker/docker/pkg/archive/utils_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/utils_test.go @@ -16,7 +16,7 @@ var testUntarFns = map[string]func(string, io.Reader) error{ return Untar(r, dest, nil) }, "applylayer": func(dest string, r io.Reader) error { - _, err := ApplyLayer(dest, ArchiveReader(r)) + _, err := ApplyLayer(dest, Reader(r)) return err }, } diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 00000000..3d9c3132 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for remoing an actaul file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 3eaf7f89..08b9840c 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -143,17 +142,6 @@ func CopyFile(src, dst string) (int64, error) { return io.Copy(df, sf) } -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} - // ReadSymlinkedDirectory returns the target directory of a symlink. // The target of the symbolic link may not be a file. func ReadSymlinkedDirectory(path string) (string, error) { diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 00000000..d5c3abf5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 00000000..5ec21cac --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/github.com/docker/docker/pkg/httputils/httputils.go index f1e5dcd1..d7dc4387 100644 --- a/vendor/github.com/docker/docker/pkg/httputils/httputils.go +++ b/vendor/github.com/docker/docker/pkg/httputils/httputils.go @@ -10,7 +10,12 @@ import ( "github.com/docker/docker/pkg/jsonmessage" ) -// Download requests a given URL and returns an io.Reader +var ( + headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) + errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") +) + +// Download requests a given URL and returns an io.Reader. func Download(url string) (resp *http.Response, err error) { if resp, err = http.Get(url); err != nil { return nil, err @@ -21,7 +26,7 @@ func Download(url string) (resp *http.Response, err error) { return resp, nil } -// NewHTTPRequestError returns a JSON response error +// NewHTTPRequestError returns a JSON response error. func NewHTTPRequestError(msg string, res *http.Response) error { return &jsonmessage.JSONError{ Message: msg, @@ -29,30 +34,23 @@ func NewHTTPRequestError(msg string, res *http.Response) error { } } +// ServerHeader contains the server information. type ServerHeader struct { App string // docker Ver string // 1.8.0-dev OS string // windows or linux } -// parseServerHeader extracts pieces from am HTTP server header -// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows) +// ParseServerHeader extracts pieces from an HTTP server header +// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). func ParseServerHeader(hdr string) (*ServerHeader, error) { - re := regexp.MustCompile(`.*\((.+)\).*$`) - r := &ServerHeader{} - if matches := re.FindStringSubmatch(hdr); matches != nil { - r.OS = matches[1] - parts := strings.Split(hdr, "/") - if len(parts) != 2 { - return nil, errors.New("Bad header: '/' missing") - } - r.App = parts[0] - v := strings.Split(parts[1], " ") - if len(v) != 2 { - return nil, errors.New("Bad header: Expected single space") - } - r.Ver = v[0] - return r, nil + matches := headerRegexp.FindStringSubmatch(hdr) + if len(matches) != 4 { + return nil, errInvalidHeader } - return nil, errors.New("Bad header: Failed regex match") + return &ServerHeader{ + App: strings.TrimSpace(matches[1]), + Ver: strings.TrimSpace(matches[2]), + OS: strings.TrimSpace(matches[3]), + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go new file mode 100644 index 00000000..d35d0821 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go @@ -0,0 +1,115 @@ +package httputils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestDownload(t *testing.T) { + expected := "Hello, docker !" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, expected) + })) + defer ts.Close() + response, err := Download(ts.URL) + if err != nil { + t.Fatal(err) + } + + actual, err := ioutil.ReadAll(response.Body) + response.Body.Close() + + if err != nil || string(actual) != expected { + t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) + } +} + +func TestDownload400Errors(t *testing.T) { + expectedError := "Got HTTP status code >= 400: 403 Forbidden" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, "something failed (forbidden)", http.StatusForbidden) + })) + defer ts.Close() + // Expected status code = 403 + if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { + t.Fatalf("Expected the the error %q, got %v", expectedError, err) + } +} + +func TestDownloadOtherErrors(t *testing.T) { + if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { + t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) + } +} + +func TestNewHTTPRequestError(t *testing.T) { + errorMessage := "Some error message" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, errorMessage, http.StatusForbidden) + })) + defer ts.Close() + httpResponse, err := http.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { + t.Fatalf("Expected err to be %q, got %v", errorMessage, err) + } +} + +func TestParseServerHeader(t *testing.T) { + inputs := map[string][]string{ + "bad header": {"error"}, + "(bad header)": {"error"}, + "(without/spaces)": {"error"}, + "(header/with spaces)": {"error"}, + "foo/bar (baz)": {"foo", "bar", "baz"}, + "foo/bar": {"error"}, + "foo": {"error"}, + "foo/bar (baz space)": {"foo", "bar", "baz space"}, + " f f / b b ( b s ) ": {"f f", "b b", "b s"}, + "foo/bar (baz) ignore": {"foo", "bar", "baz"}, + "foo/bar ()": {"error"}, + "foo/bar()": {"error"}, + "foo/bar(baz)": {"foo", "bar", "baz"}, + "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, + "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, + "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, + } + + for header, values := range inputs { + serverHeader, err := ParseServerHeader(header) + if err != nil { + if err != errInvalidHeader { + t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) + } + if values[0] == "error" { + continue + } + t.Fatalf("Header %q failed to parse when it shouldn't have", header) + } + if values[0] == "error" { + t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) + } + + if serverHeader.App != values[0] { + t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) + } + + if serverHeader.Ver != values[1] { + t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) + } + + if serverHeader.OS != values[2] { + t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) + } + + } + +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go index 5d1aee40..d5cf34e4 100644 --- a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go +++ b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go @@ -5,6 +5,7 @@ import ( "net/http" ) +// MimeTypes stores the MIME content type. var MimeTypes = struct { TextPlain string Tar string diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go new file mode 100644 index 00000000..9de433ee --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go @@ -0,0 +1,13 @@ +package httputils + +import ( + "testing" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { + t.Errorf("TestDetectContentType failed") + } +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go index 35338600..e9d05783 100644 --- a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go +++ b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go @@ -2,6 +2,7 @@ package httputils import ( "fmt" + "io" "io/ioutil" "net/http" "net/http/httptest" @@ -9,6 +10,229 @@ import ( "testing" ) +func TestResumableRequestHeaderSimpleErrors(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, world !") + })) + defer ts.Close() + + client := &http.Client{} + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedError := "client and request can't be nil\n" + resreq := &resumableRequestReader{} + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + + resreq = &resumableRequestReader{ + client: client, + request: req, + totalSize: -1, + } + expectedError = "failed to auto detect content length" + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + +} + +// Not too much failures, bails out after some wait +func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 2, + } + read, err := resreq.Read([]byte{}) + if err != nil || read != 0 { + t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) + } +} + +// Too much failures, returns the error +func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 1, + } + defer resreq.Close() + + expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` + read, err := resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError || read != 0 { + t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) + } +} + +type errorReaderCloser struct{} + +func (errorReaderCloser) Close() error { return nil } + +func (errorReaderCloser) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("A error occured") +} + +// If a an unknown error is encountered, return 0, nil and log it +func TestResumableRequestReaderWithReadError(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "500 Internal Server", + StatusCode: 500, + ContentLength: 0, + Close: true, + Body: errorReaderCloser{}, + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + read, err := resreq.Read(buf) + if err != nil { + t.Fatal(err) + } + + if read != 0 { + t.Fatalf("Expected to have read nothing, but read %v", read) + } +} + +func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "416 Requested Range Not Satisfiable", + StatusCode: 416, + ContentLength: 0, + Close: true, + Body: ioutil.NopCloser(strings.NewReader("")), + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + _, err = resreq.Read(buf) + if err == nil || err != io.EOF { + t.Fatalf("Expected an io.EOF error, got %v", err) + } +} + +func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") == "" { + t.Fatalf("Expected a Range HTTP header, got nothing") + } + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + resreq := &resumableRequestReader{ + client: client, + request: req, + lastRange: 1, + } + defer resreq.Close() + + buf := make([]byte, 2) + _, err = resreq.Read(buf) + if err == nil || err.Error() != "the server doesn't support byte ranges" { + t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) + } +} + +func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + + resreq := ResumableRequestReader(client, req, retries, 0) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + func TestResumableRequestReader(t *testing.T) { srvtxt := "some response text data" diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 00000000..a1301ee9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,195 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + + if uidMap != nil { + xUID, err := ToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + uid = xUID + } + if gidMap != nil { + xGID, err := ToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + gid = xGID + } + return uid, gid, nil +} + +// ToContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func ToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// ToHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func ToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// CreateIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, nil, err + } + if len(subuidRanges) == 0 { + return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return createIDMap(subuidRanges), createIDMap(subgidRanges), nil +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username { + // return the first entry for a user; ignores potential for multiple ranges per user + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 00000000..b57d6ef1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,60 @@ +// +build !windows + +package idtools + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/system" +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + if err := os.Chown(path, ownerUID, ownerGID); err != nil { + return err + } + // short-circuit--we were called with an existing directory and chown was requested + return nil + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go new file mode 100644 index 00000000..55b338c9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go @@ -0,0 +1,243 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +type node struct { + uid int + gid int +} + +func TestMkdirAllAs(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirall") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should be chowned, but nothing else + if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAllNewAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdirnew") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should NOT be chowned + if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdir") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + } + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should just chown to the requested uid/gid + if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // create a subdir under a dir which doesn't exist--should fail + if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { + t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") + } + + // create a subdir under an existing dir; should only change the ownership of the new subdir + if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr/bin"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func buildTree(base string, tree map[string]node) error { + for path, node := range tree { + fullPath := filepath.Join(base, path) + if err := os.MkdirAll(fullPath, 0755); err != nil { + return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) + } + if err := os.Chown(fullPath, node.uid, node.gid); err != nil { + return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) + } + } + return nil +} + +func readTree(base, root string) (map[string]node, error) { + tree := make(map[string]node) + + dirInfos, err := ioutil.ReadDir(base) + if err != nil { + return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) + } + + for _, info := range dirInfos { + s := &syscall.Stat_t{} + if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { + return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) + } + tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} + if info.IsDir() { + // read the subdirectory + subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) + if err != nil { + return nil, err + } + for path, nodeinfo := range subtree { + tree[path] = nodeinfo + } + } + } + return tree, nil +} + +func compareTrees(left, right map[string]node) error { + if len(left) != len(right) { + return fmt.Errorf("Trees aren't the same size") + } + for path, nodeLeft := range left { + if nodeRight, ok := right[path]; ok { + if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { + // mismatch + return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, + nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) + } + continue + } + return fmt.Errorf("right tree didn't contain path %q", path) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 00000000..c9e3c937 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 00000000..c1eedff1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,155 @@ +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" + "syscall" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --uid --shell /bin/login --no-create-home --disabled-login --ingroup +// useradd -M -u -s /bin/nologin -N -g +// addgroup --gid +// groupadd -g + +const baseUID int = 10000 +const baseGID int = 10000 +const idMAX int = 65534 + +var ( + userCommand string + groupCommand string + + cmdTemplates = map[string]string{ + "adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s", + "useradd": "-M -u %d -s /bin/false -N -g %s %s", + "addgroup": "--gid %d %s", + "groupadd": "-g %d %s", + } +) + +func init() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + if _, err := resolveBinary("addgroup"); err == nil { + groupCommand = "addgroup" + } else if _, err := resolveBinary("groupadd"); err == nil { + groupCommand = "groupadd" + } +} + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +// This new user's /etc/sub{uid,gid} ranges will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + // Find unused uid, gid pair + uid, err := findUnusedUID(baseUID) + if err != nil { + return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err) + } + gid, err := findUnusedGID(baseGID) + if err != nil { + return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err) + } + + // First add the group that we will use + if err := addGroup(name, gid); err != nil { + return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err) + } + // Add the user as a member of the group + if err := addUser(name, uid, name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + return uid, gid, nil +} + +func addUser(userName string, uid int, groupName string) error { + + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName) + return execAddCmd(userCommand, args) +} + +func addGroup(groupName string, gid int) error { + + if groupCommand == "" { + return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found") + } + args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName) + // only error out if the error isn't that the group already exists + // if the group exists then our needs are already met + if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") { + return err + } + return nil +} + +func execAddCmd(cmd, args string) error { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + out, err := execCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out)) + } + return nil +} + +func findUnusedUID(startUID int) (int, error) { + return findUnused("passwd", startUID) +} + +func findUnusedGID(startGID int) (int, error) { + return findUnused("group", startGID) +} + +func findUnused(file string, id int) (int, error) { + for { + cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id) + cmd := exec.Command("sh", "-c", cmdStr) + if err := cmd.Run(); err != nil { + // if a non-zero return code occurs, then we know the ID was not found + // and is usable + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + if status.ExitStatus() == 1 { + //no match, we can use this ID + return id, nil + } + } + } + return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err) + } + id++ + if id > idMAX { + return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 00000000..d98b354c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 00000000..932e1d1b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,89 @@ +package ioutils + +const maxCap = 1e6 + +// BytesPipe is io.ReadWriter which works similarly to pipe(queue). +// All written data could be read only once. Also BytesPipe is allocating +// and releasing new byte slices to adjust to current needs, so there won't be +// overgrown buffer after high load peak. +// BytesPipe isn't goroutine-safe, caller must synchronize it if needed. +type BytesPipe struct { + buf [][]byte // slice of byte-slices of buffered data + lastRead int // index in the first slice to a read point + bufLen int // length of data buffered over the slices +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe(buf []byte) *BytesPipe { + if cap(buf) == 0 { + buf = make([]byte, 0, 64) + } + return &BytesPipe{ + buf: [][]byte{buf[:0]}, + } +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (n int, err error) { + for { + // write data to the last buffer + b := bp.buf[len(bp.buf)-1] + // copy data to the current empty allocated area + n := copy(b[len(b):cap(b)], p) + // increment buffered data length + bp.bufLen += n + // include written data in last buffer + bp.buf[len(bp.buf)-1] = b[:len(b)+n] + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + // allocate slice that has twice the size of the last unless maximum reached + nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) + if maxCap < nextCap { + nextCap = maxCap + } + // add new byte slice to the buffers slice and continue writing + bp.buf = append(bp.buf, make([]byte, 0, nextCap)) + } + return +} + +func (bp *BytesPipe) len() int { + return bp.bufLen - bp.lastRead +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + for { + read := copy(p, bp.buf[0][bp.lastRead:]) + n += read + bp.lastRead += read + if bp.len() == 0 { + // we have read everything. reset to the beginning. + bp.lastRead = 0 + bp.bufLen -= len(bp.buf[0]) + bp.buf[0] = bp.buf[0][:0] + break + } + // break if everything was read + if len(p) == read { + break + } + // more buffered data and more asked. read from next slice. + p = p[read:] + bp.lastRead = 0 + bp.bufLen -= len(bp.buf[0]) + bp.buf[0] = nil // throw away old slice + bp.buf = bp.buf[1:] // switch to next + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go new file mode 100644 index 00000000..62b1a186 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go @@ -0,0 +1,141 @@ +package ioutils + +import ( + "crypto/sha1" + "encoding/hex" + "testing" +) + +func TestBytesPipeRead(t *testing.T) { + buf := NewBytesPipe(nil) + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + rd := make([]byte, 4) + n, err := buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "1234" { + t.Fatalf("Read %s, but must be %s", rd, "1234") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "5678" { + t.Fatalf("Read %s, but must be %s", rd, "5679") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) + } + if string(rd[:n]) != "90" { + t.Fatalf("Read %s, but must be %s", rd, "90") + } +} + +func TestBytesPipeWrite(t *testing.T) { + buf := NewBytesPipe(nil) + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + if string(buf.buf[0]) != "1234567890" { + t.Fatalf("Buffer %s, must be %s", buf.buf, "1234567890") + } +} + +// Write and read in different speeds/chunk sizes and check valid data is read. +func TestBytesPipeWriteRandomChunks(t *testing.T) { + cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ + {100, 10, 1}, + {1000, 10, 5}, + {1000, 100, 0}, + {1000, 5, 6}, + {10000, 50, 25}, + } + + testMessage := []byte("this is a random string for testing") + // random slice sizes to read and write + writeChunks := []int{25, 35, 15, 20} + readChunks := []int{5, 45, 20, 25} + + for _, c := range cases { + // first pass: write directly to hash + hash := sha1.New() + for i := 0; i < c.iterations*c.writesPerLoop; i++ { + if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { + t.Fatal(err) + } + } + expected := hex.EncodeToString(hash.Sum(nil)) + + // write/read through buffer + buf := NewBytesPipe(nil) + hash.Reset() + for i := 0; i < c.iterations; i++ { + for w := 0; w < c.writesPerLoop; w++ { + buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) + } + for r := 0; r < c.readsPerLoop; r++ { + p := make([]byte, readChunks[(i*c.readsPerLoop+r)%len(readChunks)]) + n, _ := buf.Read(p) + hash.Write(p[:n]) + } + } + // read rest of the data from buffer + for i := 0; ; i++ { + p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) + n, _ := buf.Read(p) + if n == 0 { + break + } + hash.Write(p[:n]) + } + actual := hex.EncodeToString(hash.Sum(nil)) + + if expected != actual { + t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) + } + + } +} + +func BenchmarkBytesPipeWrite(b *testing.B) { + for i := 0; i < b.N; i++ { + buf := NewBytesPipe(nil) + for j := 0; j < 1000; j++ { + buf.Write([]byte("pretty short line, because why not?")) + } + } +} + +func BenchmarkBytesPipeRead(b *testing.B) { + rd := make([]byte, 1024) + for i := 0; i < b.N; i++ { + b.StopTimer() + buf := NewBytesPipe(nil) + for j := 0; j < 1000; j++ { + buf.Write(make([]byte, 1024)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + if n, _ := buf.Read(rd); n != 1024 { + b.Fatalf("Wrong number of bytes: %d", n) + } + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go index 801132ff..0b04b0ba 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go @@ -12,3 +12,11 @@ func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { } return 0, nil } + +// FprintfIfTrue prints the boolean value if it's true +func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { + if ok { + return fmt.Fprintf(w, format, ok) + } + return 0, nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go index f231aa9d..0d2d76b4 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go @@ -53,7 +53,7 @@ func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { } if rdrOffset == s && i != len(r.readers)-1 { - idx += 1 + idx++ rdrOffset = 0 } r.pos = &pos{idx, rdrOffset} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index ff09baad..54dd312b 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -1,14 +1,10 @@ package ioutils import ( - "bytes" - "crypto/rand" "crypto/sha256" "encoding/hex" "io" - "math/big" "sync" - "time" ) type readCloserWrapper struct { @@ -20,6 +16,7 @@ func (r *readCloserWrapper) Close() error { return r.closer() } +// NewReadCloserWrapper returns a new io.ReadCloser. func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { return &readCloserWrapper{ Reader: r, @@ -40,6 +37,7 @@ func (r *readerErrWrapper) Read(p []byte) (int, error) { return n, err } +// NewReaderErrWrapper returns a new io.Reader. func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { return &readerErrWrapper{ reader: r, @@ -53,41 +51,27 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { // expanding buffer. type bufReader struct { sync.Mutex - buf *bytes.Buffer - reader io.Reader - err error - wait sync.Cond - drainBuf []byte - reuseBuf []byte - maxReuse int64 - resetTimeout time.Duration - bufLenResetThreshold int64 - maxReadDataReset int64 + buf io.ReadWriter + reader io.Reader + err error + wait sync.Cond + drainBuf []byte } -func NewBufReader(r io.Reader) *bufReader { - var timeout int - if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil { - timeout = int(randVal.Int64()) + 180 - } else { - timeout = 300 - } +// NewBufReader returns a new bufReader. +func NewBufReader(r io.Reader) io.ReadCloser { reader := &bufReader{ - buf: &bytes.Buffer{}, - drainBuf: make([]byte, 1024), - reuseBuf: make([]byte, 4096), - maxReuse: 1000, - resetTimeout: time.Second * time.Duration(timeout), - bufLenResetThreshold: 100 * 1024, - maxReadDataReset: 10 * 1024 * 1024, - reader: r, + buf: NewBytesPipe(nil), + reader: r, + drainBuf: make([]byte, 1024), } reader.wait.L = &reader.Mutex go reader.drain() return reader } -func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { +// NewBufReaderWithDrainbufAndBuffer returns a BufReader with drainBuffer and buffer. +func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer io.ReadWriter) io.ReadCloser { reader := &bufReader{ buf: buffer, drainBuf: drainBuffer, @@ -99,97 +83,24 @@ func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer * } func (r *bufReader) drain() { - var ( - duration time.Duration - lastReset time.Time - now time.Time - reset bool - bufLen int64 - dataSinceReset int64 - maxBufLen int64 - reuseBufLen int64 - reuseCount int64 - ) - reuseBufLen = int64(len(r.reuseBuf)) - lastReset = time.Now() for { + //Call to scheduler is made to yield from this goroutine. + //This avoids goroutine looping here when n=0,err=nil, fixes code hangs when run with GCC Go. + callSchedulerIfNecessary() n, err := r.reader.Read(r.drainBuf) - dataSinceReset += int64(n) r.Lock() - bufLen = int64(r.buf.Len()) - if bufLen > maxBufLen { - maxBufLen = bufLen - } - - // Avoid unbounded growth of the buffer over time. - // This has been discovered to be the only non-intrusive - // solution to the unbounded growth of the buffer. - // Alternative solutions such as compression, multiple - // buffers, channels and other similar pieces of code - // were reducing throughput, overall Docker performance - // or simply crashed Docker. - // This solution releases the buffer when specific - // conditions are met to avoid the continuous resizing - // of the buffer for long lived containers. - // - // Move data to the front of the buffer if it's - // smaller than what reuseBuf can store - if bufLen > 0 && reuseBufLen >= bufLen { - n, _ := r.buf.Read(r.reuseBuf) - r.buf.Write(r.reuseBuf[0:n]) - // Take action if the buffer has been reused too many - // times and if there's data in the buffer. - // The timeout is also used as means to avoid doing - // these operations more often or less often than - // required. - // The various conditions try to detect heavy activity - // in the buffer which might be indicators of heavy - // growth of the buffer. - } else if reuseCount >= r.maxReuse && bufLen > 0 { - now = time.Now() - duration = now.Sub(lastReset) - timeoutReached := duration >= r.resetTimeout - - // The timeout has been reached and the - // buffered data couldn't be moved to the front - // of the buffer, so the buffer gets reset. - if timeoutReached && bufLen > reuseBufLen { - reset = true - } - // The amount of buffered data is too high now, - // reset the buffer. - if timeoutReached && maxBufLen >= r.bufLenResetThreshold { - reset = true - } - // Reset the buffer if a certain amount of - // data has gone through the buffer since the - // last reset. - if timeoutReached && dataSinceReset >= r.maxReadDataReset { - reset = true - } - // The buffered data is moved to a fresh buffer, - // swap the old buffer with the new one and - // reset all counters. - if reset { - newbuf := &bytes.Buffer{} - newbuf.ReadFrom(r.buf) - r.buf = newbuf - lastReset = now - reset = false - dataSinceReset = 0 - maxBufLen = 0 - reuseCount = 0 - } - } if err != nil { r.err = err } else { - r.buf.Write(r.drainBuf[0:n]) + if n == 0 { + // nothing written, no need to signal + r.Unlock() + continue + } + r.buf.Write(r.drainBuf[:n]) } - reuseCount++ r.wait.Signal() r.Unlock() - callSchedulerIfNecessary() if err != nil { break } @@ -211,6 +122,7 @@ func (r *bufReader) Read(p []byte) (n int, err error) { } } +// Close closes the bufReader func (r *bufReader) Close() error { closer, ok := r.reader.(io.ReadCloser) if !ok { @@ -219,6 +131,7 @@ func (r *bufReader) Close() error { return closer.Close() } +// HashData returns the sha256 sum of src. func HashData(src io.Reader) (string, error) { h := sha256.New() if _, err := io.Copy(h, src); err != nil { @@ -227,6 +140,8 @@ func HashData(src io.Reader) (string, error) { return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } +// OnEOFReader wraps a io.ReadCloser and a function +// the function will run at the end of file or close the file. type OnEOFReader struct { Rc io.ReadCloser Fn func() @@ -240,6 +155,7 @@ func (r *OnEOFReader) Read(p []byte) (n int, err error) { return } +// Close closes the file and run the function. func (r *OnEOFReader) Close() error { err := r.Rc.Close() r.runFunc() diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go index 0a39b6ec..5c26a2a1 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "strings" "testing" + "time" ) // Implement io.Reader @@ -61,8 +62,8 @@ func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) { reader, writer := io.Pipe() drainBuffer := make([]byte, 1024) - buffer := bytes.Buffer{} - bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer) + buffer := NewBytesPipe(nil) + bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, buffer) // Write everything down to a Pipe // Usually, a pipe should block but because of the buffered reader, @@ -76,7 +77,11 @@ func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) { // Drain the reader *after* everything has been written, just to verify // it is indeed buffering - <-done + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("timeout") + } output, err := ioutil.ReadAll(bufreader) if err != nil { @@ -124,13 +129,16 @@ func TestBufReaderCloseWithNonReaderCloser(t *testing.T) { } // implements io.ReadCloser -type simpleReaderCloser struct{} +type simpleReaderCloser struct { + err error +} func (r *simpleReaderCloser) Read(p []byte) (n int, err error) { - return 0, nil + return 0, r.err } func (r *simpleReaderCloser) Close() error { + r.err = io.EOF return nil } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 00000000..1539ad21 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 00000000..c258e5fd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go index 25095474..2b35a266 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -1,41 +1,86 @@ package ioutils import ( + "errors" "io" "net/http" "sync" ) +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. type WriteFlusher struct { - sync.Mutex + mu sync.Mutex w io.Writer flusher http.Flusher flushed bool + closed error + + // TODO(stevvooe): Use channel for closed instead, remove mutex. Using a + // channel will allow one to properly order the operations. } +var errWriteFlusherClosed = errors.New("writeflusher: closed") + func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - wf.Lock() - defer wf.Unlock() + wf.mu.Lock() + defer wf.mu.Unlock() + if wf.closed != nil { + return 0, wf.closed + } + n, err = wf.w.Write(b) - wf.flushed = true - wf.flusher.Flush() + wf.flush() // every write is a flush. return n, err } // Flush the stream immediately. func (wf *WriteFlusher) Flush() { - wf.Lock() - defer wf.Unlock() + wf.mu.Lock() + defer wf.mu.Unlock() + + wf.flush() +} + +// flush the stream immediately without taking a lock. Used internally. +func (wf *WriteFlusher) flush() { + if wf.closed != nil { + return + } + wf.flushed = true wf.flusher.Flush() } +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. func (wf *WriteFlusher) Flushed() bool { - wf.Lock() - defer wf.Unlock() + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + wf.mu.Lock() + defer wf.mu.Unlock() + return wf.flushed } +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.mu.Lock() + defer wf.mu.Unlock() + + if wf.closed != nil { + return wf.closed + } + + wf.closed = errWriteFlusherClosed + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. func NewWriteFlusher(w io.Writer) *WriteFlusher { var flusher http.Flusher if f, ok := w.(http.Flusher); ok { diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go index 43fdc44e..7a3249f3 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -2,6 +2,7 @@ package ioutils import "io" +// NopWriter represents a type which write operation is nop. type NopWriter struct{} func (*NopWriter) Write(buf []byte) (int, error) { @@ -14,12 +15,15 @@ type nopWriteCloser struct { func (w *nopWriteCloser) Close() error { return nil } +// NopWriteCloser returns a nopWriteCloser. func NopWriteCloser(w io.Writer) io.WriteCloser { return &nopWriteCloser{w} } +// NopFlusher represents a type which flush opetatin is nop. type NopFlusher struct{} +// Flush is a nop operation. func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { @@ -31,6 +35,7 @@ func (r *writeCloserWrapper) Close() error { return r.closer() } +// NewWriteCloserWrapper returns a new io.WriteCloser. func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { return &writeCloserWrapper{ Writer: r, @@ -38,7 +43,7 @@ func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { } } -// Wrap a concrete io.Writer and hold a count of the number +// WriteCounter wraps a concrete io.Writer and hold a count of the number // of bytes written to the writer during a "session". // This can be convenient when write return is masked // (e.g., json.Encoder.Encode()) @@ -47,6 +52,7 @@ type WriteCounter struct { Writer io.Writer } +// NewWriteCounter returns a new WriteCounter. func NewWriteCounter(w io.Writer) *WriteCounter { return &WriteCounter{ Writer: w, diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index 7db1626e..451c6a9f 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -12,6 +12,8 @@ import ( "github.com/docker/docker/pkg/units" ) +// JSONError wraps a concrete Code and Message, `Code` is +// is a integer error code, `Message` is the error message. type JSONError struct { Code int `json:"code,omitempty"` Message string `json:"message,omitempty"` @@ -21,10 +23,14 @@ func (e *JSONError) Error() string { return e.Message } +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. type JSONProgress struct { terminalFd uintptr - Current int `json:"current,omitempty"` - Total int `json:"total,omitempty"` + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` Start int64 `json:"start,omitempty"` } @@ -61,10 +67,16 @@ func (p *JSONProgress) String() string { } pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) } + numbersBox = fmt.Sprintf("%8v/%v", current, total) + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) + fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry left = (left / time.Second) * time.Second @@ -76,6 +88,9 @@ func (p *JSONProgress) String() string { return pbBox + numbersBox + timeLeftBox } +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. type JSONMessage struct { Stream string `json:"stream,omitempty"` Status string `json:"status,omitempty"` @@ -84,10 +99,14 @@ type JSONMessage struct { ID string `json:"id,omitempty"` From string `json:"from,omitempty"` Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` ErrorMessage string `json:"error,omitempty"` //deprecated } +// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` +// is a terminal. If this is the case, it will erase the entire current line +// when dislaying the progressbar. func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.Error != nil { if jm.Error.Code == 401 { @@ -103,7 +122,9 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal return nil } - if jm.Time != 0 { + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(timeutils.RFC3339NanoFixed)) + } else if jm.Time != 0 { fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) } if jm.ID != "" { @@ -124,6 +145,9 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { return nil } +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error { var ( dec = json.NewDecoder(in) diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go index 2e78fa7e..7f46a8f6 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go @@ -3,12 +3,12 @@ package jsonmessage import ( "bytes" "fmt" + "strings" "testing" "time" "github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/timeutils" - "strings" ) func TestError(t *testing.T) { @@ -45,7 +45,7 @@ func TestProgress(t *testing.T) { } // this number can't be negative gh#7136 - expected = "[==================================================>] 50 B/40 B" + expected = "[==================================================>] 50 B" jp5 := JSONProgress{Current: 50, Total: 40} if jp5.String() != expected { t.Fatalf("Expected %q, got %q", expected, jp5.String()) @@ -53,7 +53,7 @@ func TestProgress(t *testing.T) { } func TestJSONMessageDisplay(t *testing.T) { - now := time.Now().Unix() + now := time.Now() messages := map[JSONMessage][]string{ // Empty JSONMessage{}: {"\n", "\n"}, @@ -66,13 +66,34 @@ func TestJSONMessageDisplay(t *testing.T) { }, // General JSONMessage{ - Time: now, + Time: now.Unix(), ID: "ID", From: "From", Status: "status", }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now, 0).Format(timeutils.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now, 0).Format(timeutils.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(timeutils.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(timeutils.RFC3339NanoFixed)), + }, + // General, with nano precision time + JSONMessage{ + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(timeutils.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(timeutils.RFC3339NanoFixed)), + }, + // General, with both times Nano is preferred + JSONMessage{ + Time: now.Unix(), + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(timeutils.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(timeutils.RFC3339NanoFixed)), }, // Stream over status JSONMessage{ diff --git a/vendor/github.com/docker/docker/pkg/mflag/flag.go b/vendor/github.com/docker/docker/pkg/mflag/flag.go index fa8b0458..43fd3051 100644 --- a/vendor/github.com/docker/docker/pkg/mflag/flag.go +++ b/vendor/github.com/docker/docker/pkg/mflag/flag.go @@ -200,6 +200,24 @@ func (i *uint64Value) Get() interface{} { return uint64(*i) } func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } +// -- uint16 Value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} + +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Get() interface{} { return uint16(*i) } + +func (i *uint16Value) String() string { return fmt.Sprintf("%v", *i) } + // -- string Value type stringValue string @@ -571,7 +589,7 @@ var Usage = func() { PrintDefaults() } -// Usage prints to standard error a usage message documenting the standard command layout +// ShortUsage prints to standard error a usage message documenting the standard command layout // The function is a variable that may be changed to point to a custom function. var ShortUsage = func() { fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) @@ -757,6 +775,32 @@ func Uint64(names []string, value uint64, usage string) *uint64 { return CommandLine.Uint64(names, value, usage) } +// Uint16Var defines a uint16 flag with specified name, default value, and usage string. +// The argument p points to a uint16 variable in which to store the value of the flag. +func (fs *FlagSet) Uint16Var(p *uint16, names []string, value uint16, usage string) { + fs.Var(newUint16Value(value, p), names, usage) +} + +// Uint16Var defines a uint16 flag with specified name, default value, and usage string. +// The argument p points to a uint16 variable in which to store the value of the flag. +func Uint16Var(p *uint16, names []string, value uint16, usage string) { + CommandLine.Var(newUint16Value(value, p), names, usage) +} + +// Uint16 defines a uint16 flag with specified name, default value, and usage string. +// The return value is the address of a uint16 variable that stores the value of the flag. +func (fs *FlagSet) Uint16(names []string, value uint16, usage string) *uint16 { + p := new(uint16) + fs.Uint16Var(p, names, value, usage) + return p +} + +// Uint16 defines a uint16 flag with specified name, default value, and usage string. +// The return value is the address of a uint16 variable that stores the value of the flag. +func Uint16(names []string, value uint16, usage string) *uint16 { + return CommandLine.Uint16(names, value, usage) +} + // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { @@ -1058,7 +1102,7 @@ func (fs *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - os.Exit(2) + os.Exit(125) case PanicOnError: panic(err) } @@ -1094,7 +1138,7 @@ func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { } // ReportError is a utility method that prints a user-friendly message -// containing the error that occured during parsing and a suggestion to get help +// containing the error that occurred during parsing and a suggestion to get help func (fs *FlagSet) ReportError(str string, withHelp bool) { if withHelp { if os.Args[0] == fs.Name() { diff --git a/vendor/github.com/docker/docker/pkg/nat/nat.go b/vendor/github.com/docker/docker/pkg/nat/nat.go index 1fbb13e6..6595feb0 100644 --- a/vendor/github.com/docker/docker/pkg/nat/nat.go +++ b/vendor/github.com/docker/docker/pkg/nat/nat.go @@ -34,17 +34,20 @@ type PortSet map[Port]struct{} // Port is a string containing port number and protocol in the format "80/tcp" type Port string -// NewPort creates a new instance of a Port given a protocol and port number +// NewPort creates a new instance of a Port given a protocol and port number or port range func NewPort(proto, port string) (Port, error) { // Check for parsing issues on "port" now so we can avoid having // to check it later on. - portInt, err := ParsePort(port) + portStartInt, portEndInt, err := ParsePortRange(port) if err != nil { return "", err } - return Port(fmt.Sprintf("%d/%s", portInt, proto)), nil + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil } // ParsePort parses the port number string and returns an int @@ -59,6 +62,18 @@ func ParsePort(rawPort string) (int, error) { return int(port), nil } +// ParsePortRange parses the port range string and returns start/end ints +func ParsePortRange(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := parsers.ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + // Proto returns the protocol of a Port func (p Port) Proto() string { proto, _ := SplitProtoPort(string(p)) @@ -84,6 +99,11 @@ func (p Port) Int() int { return int(port) } +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRange(p.Port()) +} + // SplitProtoPort splits a port in the format of proto/port func SplitProtoPort(rawPort string) (string, string) { parts := strings.Split(rawPort, "/") @@ -162,7 +182,12 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, } if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } } if !validateProto(strings.ToLower(proto)) { @@ -174,6 +199,11 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, if len(hostPort) > 0 { hostPort = strconv.FormatUint(startHostPort+i, 10) } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } port, err := NewPort(strings.ToLower(proto), containerPort) if err != nil { return nil, nil, err diff --git a/vendor/github.com/docker/docker/pkg/nat/nat_test.go b/vendor/github.com/docker/docker/pkg/nat/nat_test.go index d9472cc7..2c71142b 100644 --- a/vendor/github.com/docker/docker/pkg/nat/nat_test.go +++ b/vendor/github.com/docker/docker/pkg/nat/nat_test.go @@ -41,6 +41,56 @@ func TestParsePort(t *testing.T) { } } +func TestParsePortRange(t *testing.T) { + var ( + begin int + end int + err error + ) + + type TestRange struct { + Range string + Begin int + End int + } + validRanges := []TestRange{ + {"1234", 1234, 1234}, + {"1234-1234", 1234, 1234}, + {"1234-1235", 1234, 1235}, + {"8000-9000", 8000, 9000}, + {"0", 0, 0}, + {"0-0", 0, 0}, + } + + for _, r := range validRanges { + begin, end, err = ParsePortRange(r.Range) + + if err != nil || begin != r.Begin { + t.Fatalf("Parsing port range '%s' did not succeed. Expected begin %d, got %d", r.Range, r.Begin, begin) + } + if err != nil || end != r.End { + t.Fatalf("Parsing port range '%s' did not succeed. Expected end %d, got %d", r.Range, r.End, end) + } + } + + invalidRanges := []string{ + "asdf", + "1asdf", + "9000-8000", + "9000-", + "-8000", + "-8000-", + } + + for _, r := range invalidRanges { + begin, end, err = ParsePortRange(r) + + if err == nil || begin != 0 || end != 0 { + t.Fatalf("Parsing port range '%s' succeeded", r) + } + } +} + func TestPort(t *testing.T) { p, err := NewPort("tcp", "1234") @@ -68,6 +118,20 @@ func TestPort(t *testing.T) { if err == nil { t.Fatal("tcp, asd1234 was supposed to fail") } + + p, err = NewPort("tcp", "1234-1230") + if err == nil { + t.Fatal("tcp, 1234-1230 was supposed to fail") + } + + p, err = NewPort("tcp", "1234-1242") + if err != nil { + t.Fatalf("tcp, 1234-1242 had a parsing issue: %v", err) + } + + if string(p) != "1234-1242/tcp" { + t.Fatal("tcp, 1234-1242 did not result in the string 1234-1242/tcp") + } } func TestSplitProtoPort(t *testing.T) { diff --git a/vendor/github.com/docker/docker/pkg/nat/sort.go b/vendor/github.com/docker/docker/pkg/nat/sort.go index 0a9dd078..1eb0fedd 100644 --- a/vendor/github.com/docker/docker/pkg/nat/sort.go +++ b/vendor/github.com/docker/docker/pkg/nat/sort.go @@ -2,8 +2,9 @@ package nat import ( "sort" - "strconv" "strings" + + "github.com/docker/docker/pkg/parsers" ) type portSorter struct { @@ -88,8 +89,8 @@ func SortPortMap(ports []Port, bindings PortMap) { } } -func toInt(s string) int64 { - i, err := strconv.ParseInt(s, 10, 64) +func toInt(s string) uint64 { + i, _, err := parsers.ParsePortRange(s) if err != nil { i = 0 } diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go similarity index 95% rename from vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go index 6a2c2468..dc8c0e30 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go @@ -1,3 +1,5 @@ +// +build !windows + package kernel import ( @@ -19,6 +21,7 @@ func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int } } +// TestParseRelease tests the ParseRelease() function func TestParseRelease(t *testing.T) { assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) @@ -48,6 +51,7 @@ func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { } } +// TestCompareKernelVersion tests the CompareKernelVersion() function func TestCompareKernelVersion(t *testing.T) { assertKernelVersion(t, VersionInfo{Kernel: 3, Major: 8, Minor: 0}, diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go index ca8ea8f0..f6f0a728 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -3,9 +3,14 @@ package operatingsystem import ( + "bufio" "bytes" - "errors" + "fmt" "io/ioutil" + "os" + "strings" + + "github.com/mattn/go-shellwords" ) var ( @@ -18,15 +23,34 @@ var ( // GetOperatingSystem gets the name of the current operating system. func GetOperatingSystem() (string, error) { - b, err := ioutil.ReadFile(etcOsRelease) + osReleaseFile, err := os.Open(etcOsRelease) if err != nil { return "", err } - if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 { - b = b[i+13:] - return string(b[:bytes.IndexByte(b, '"')]), nil + defer osReleaseFile.Close() + + var prettyName string + scanner := bufio.NewScanner(osReleaseFile) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PRETTY_NAME=") { + data := strings.SplitN(line, "=", 2) + prettyNames, err := shellwords.Parse(data[1]) + if err != nil { + return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) + } + if len(prettyNames) != 1 { + return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) + } + prettyName = prettyNames[0] + } } - return "", errors.New("PRETTY_NAME not found") + if prettyName != "" { + return prettyName, nil + } + // If not set, defaults to PRETTY_NAME="Linux" + // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html + return "Linux", nil } // IsContainerized returns true if we are running inside a container. diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go similarity index 54% rename from vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go rename to vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go index b7d54cbb..1bad093c 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go @@ -1,3 +1,5 @@ +// +build linux freebsd + package operatingsystem import ( @@ -8,9 +10,74 @@ import ( ) func TestGetOperatingSystem(t *testing.T) { - var ( - backup = etcOsRelease - ubuntuTrusty = []byte(`NAME="Ubuntu" + var backup = etcOsRelease + + invalids := []struct { + content string + errorExpected string + }{ + { + `PRETTY_NAME=Source Mage GNU/Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux", + }, + { + `PRETTY_NAME="Ubuntu Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME=Ubuntu' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS", + }, + } + + valids := []struct { + content string + expected string + }{ + { + `NAME="Ubuntu" +PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`, + "Gentoo/Linux", + }, + { + `NAME="Ubuntu" VERSION="14.04, Trusty Tahr" ID=ubuntu ID_LIKE=debian @@ -18,24 +85,28 @@ PRETTY_NAME="Ubuntu 14.04 LTS" VERSION_ID="14.04" HOME_URL="http://www.ubuntu.com/" SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) - gentoo = []byte(`NAME=Gentoo -ID=gentoo -PRETTY_NAME="Gentoo/Linux" -ANSI_COLOR="1;32" -HOME_URL="http://www.gentoo.org/" -SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" -BUG_REPORT_URL="https://bugs.gentoo.org/" -`) - noPrettyName = []byte(`NAME="Ubuntu" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Ubuntu 14.04 LTS", + }, + { + `NAME="Ubuntu" VERSION="14.04, Trusty Tahr" ID=ubuntu ID_LIKE=debian -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) - ) +PRETTY_NAME='Ubuntu 14.04 LTS'`, + "Ubuntu 14.04 LTS", + }, + { + `PRETTY_NAME=Source +NAME="Source Mage"`, + "Source", + }, + { + `PRETTY_NAME=Source +PRETTY_NAME="Source Mage"`, + "Source Mage", + }, + } dir := os.TempDir() etcOsRelease = filepath.Join(dir, "etcOsRelease") @@ -45,21 +116,23 @@ BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) etcOsRelease = backup }() - for expect, osRelease := range map[string][]byte{ - "Ubuntu 14.04 LTS": ubuntuTrusty, - "Gentoo/Linux": gentoo, - "": noPrettyName, - } { - if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil { + for _, elt := range invalids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { t.Fatalf("failed to write to %s: %v", etcOsRelease, err) } s, err := GetOperatingSystem() - if s != expect { - if expect == "" { - t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err) - } else { - t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err) - } + if err == nil || err.Error() != elt.errorExpected { + t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err) + } + } + + for _, elt := range valids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != elt.expected { + t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err) } } } diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go index e326a119..453cccfb 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/parsers.go +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -5,6 +5,7 @@ package parsers import ( "fmt" + "net" "net/url" "path" "runtime" @@ -12,18 +13,20 @@ import ( "strings" ) -// ParseHost parses the specified address and returns an address that will be used as the host. +// ParseDockerDaemonHost parses the specified address and returns an address that will be used as the host. // Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr -// FIXME: Change this not to receive default value as parameter -func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { +// defaultUnixAddr must be a absolute file path (no `unix://` prefix) +// defaultTCPAddr must be the full `tcp://host:port` form +func ParseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { addr = strings.TrimSpace(addr) if addr == "" { - if runtime.GOOS != "windows" { - addr = fmt.Sprintf("unix://%s", defaultUnixAddr) - } else { - // Note - defaultTCPAddr already includes tcp:// prefix - addr = defaultTCPAddr + if defaultAddr == defaultTLSHost { + return defaultTLSHost, nil } + if runtime.GOOS != "windows" { + return fmt.Sprintf("unix://%s", defaultUnixAddr), nil + } + return defaultTCPAddr, nil } addrParts := strings.Split(addr, "://") if len(addrParts) == 1 { @@ -59,29 +62,54 @@ func ParseUnixAddr(addr string, defaultAddr string) (string, error) { // ParseTCPAddr parses and validates that the specified address is a valid TCP // address. It returns a formatted TCP address, either using the address parsed -// from addr, or the contents of defaultAddr if addr is a blank string. -func ParseTCPAddr(addr string, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, "tcp://") +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort } u, err := url.Parse("tcp://" + addr) if err != nil { return "", err } - hostParts := strings.Split(u.Host, ":") - if len(hostParts) != 2 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - host := hostParts[0] - if host == "" { - host = defaultAddr + + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) } - p, err := strconv.Atoi(hostParts[1]) + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if net.ParseIP(host).To4() == nil && strings.Contains(host, ":") { + // This is either an ipv6 address + host = "[" + host + "]" } return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil } @@ -116,7 +144,7 @@ func PartParser(template, data string) (map[string]string, error) { out = make(map[string]string, len(templateParts)) ) if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) } for i, t := range templateParts { @@ -185,3 +213,53 @@ func ParseLink(val string) (string, string, error) { } return arr[0], arr[1], nil } + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go index 903c66af..47b45281 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go @@ -1,52 +1,123 @@ package parsers import ( + "reflect" + "runtime" "strings" "testing" ) -func TestParseHost(t *testing.T) { +func TestParseDockerDaemonHost(t *testing.T) { var ( - defaultHTTPHost = "127.0.0.1" - defaultUnix = "/var/run/docker.sock" + defaultHTTPHost = "tcp://localhost:2375" + defaultHTTPSHost = "tcp://localhost:2376" + defaultUnix = "/var/run/docker.sock" + defaultHOST = "unix:///var/run/docker.sock" ) + if runtime.GOOS == "windows" { + defaultHOST = defaultHTTPHost + } invalids := map[string]string{ - "0.0.0.0": "Invalid bind address format: 0.0.0.0", - "tcp://": "Invalid proto, expected tcp: ", - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "tcp://unix:///run/docker.sock": "Invalid bind address format: unix", + "tcp": "Invalid bind address format: tcp", + "unix": "Invalid bind address format: unix", + "fd": "Invalid bind address format: fd", } valids := map[string]string{ - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - ":6666": "tcp://127.0.0.1:6666", - ":6666/path": "tcp://127.0.0.1:6666/path", - "tcp://:7777": "tcp://127.0.0.1:7777", - "tcp://:7777/path": "tcp://127.0.0.1:7777/path", - "": "unix:///var/run/docker.sock", + "0.0.0.1:": "tcp://0.0.0.1:2375", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + "[::1]:": "tcp://[::1]:2375", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + ":6666": "tcp://localhost:6666", + ":6666/path": "tcp://localhost:6666/path", + "": defaultHOST, + " ": defaultHOST, + " ": defaultHOST, + "tcp://": defaultHTTPHost, + "tcp://:7777": "tcp://localhost:7777", + "tcp://:7777/path": "tcp://localhost:7777/path", + " tcp://:7777/path ": "tcp://localhost:7777/path", "unix:///run/docker.sock": "unix:///run/docker.sock", "unix://": "unix:///var/run/docker.sock", "fd://": "fd://", "fd://something": "fd://something", + "localhost:": "tcp://localhost:2375", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", } for invalidAddr, expectedError := range invalids { - if addr, err := ParseHost(defaultHTTPHost, defaultUnix, invalidAddr); err == nil || err.Error() != expectedError { + if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", invalidAddr); err == nil || err.Error() != expectedError { t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) } } for validAddr, expectedAddr := range valids { - if addr, err := ParseHost(defaultHTTPHost, defaultUnix, validAddr); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got %v", validAddr, expectedAddr, addr) + if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseTCP(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + ) + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "": defaultHTTPHost, + "tcp://": defaultHTTPHost, + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "[::1]:": "tcp://[::1]:2376", + "[::1]:5555": "tcp://[::1]:5555", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", + "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + "localhost:": "tcp://localhost:2376", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) } } } func TestParseInvalidUnixAddrInvalid(t *testing.T) { - if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + if _, err := ParseUnixAddr("tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { t.Fatalf("Expected an error, got %v", err) } + if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if v, err := ParseUnixAddr("", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { + t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") + } } func TestParseRepositoryTag(t *testing.T) { @@ -79,29 +150,6 @@ func TestParseRepositoryTag(t *testing.T) { } } -func TestParsePortMapping(t *testing.T) { - if _, err := PartParser("ip:public:private", "192.168.1.1:80"); err == nil { - t.Fatalf("Expected an error, got %v", err) - } - data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") - if err != nil { - t.Fatal(err) - } - - if len(data) != 3 { - t.FailNow() - } - if data["ip"] != "192.168.1.1" { - t.Fail() - } - if data["public"] != "80" { - t.Fail() - } - if data["private"] != "8080" { - t.Fail() - } -} - func TestParseKeyValueOpt(t *testing.T) { invalids := map[string]string{ "": "Unable to parse key/value option: ", @@ -208,3 +256,40 @@ func TestParseLink(t *testing.T) { t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) } } + +func TestParseUintList(t *testing.T) { + valids := map[string]map[int]bool{ + "": {}, + "7": {7: true}, + "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, + "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, + "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, + "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, + "03,1-3": {1: true, 2: true, 3: true}, + "3,2,1": {1: true, 2: true, 3: true}, + "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, + } + for k, v := range valids { + out, err := ParseUintList(k) + if err != nil { + t.Fatalf("Expected not to fail, got %v", err) + } + if !reflect.DeepEqual(out, v) { + t.Fatalf("Expected %v, got %v", v, out) + } + } + + invalids := []string{ + "this", + "1--", + "1-10,,10", + "10-1", + "-1", + "-1,0", + } + for _, v := range invalids { + if out, err := ParseUintList(v); err == nil { + t.Fatalf("Expected failure with %s but got %v", v, out) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go index 865f5f39..e560aff1 100644 --- a/vendor/github.com/docker/docker/pkg/random/random.go +++ b/vendor/github.com/docker/docker/pkg/random/random.go @@ -1,7 +1,10 @@ package random import ( + cryptorand "crypto/rand" "io" + "math" + "math/big" "math/rand" "sync" "time" @@ -36,8 +39,15 @@ func (r *lockedSource) Seed(seed int64) { // NewSource returns math/rand.Source safe for concurrent use and initialized // with current unix-nano timestamp func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } return &lockedSource{ - src: rand.NewSource(time.Now().UnixNano()), + src: rand.NewSource(seed), } } diff --git a/vendor/github.com/docker/docker/pkg/signal/README.md b/vendor/github.com/docker/docker/pkg/signal/README.md new file mode 100644 index 00000000..2b237a59 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go new file mode 100644 index 00000000..68bb77cf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal.go @@ -0,0 +1,54 @@ +// Package signal provides helper functions for dealing with signals across +// various operating systems. +package signal + +import ( + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" +) + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return signal, nil +} + +// ValidSignalForPlatform returns true if a signal is valid on the platform +func ValidSignalForPlatform(sig syscall.Signal) bool { + for _, v := range SignalMap { + if v == sig { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go new file mode 100644 index 00000000..946de87e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go @@ -0,0 +1,41 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Darwin signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go new file mode 100644 index 00000000..6b9569bb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go @@ -0,0 +1,43 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of FreeBSD signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go new file mode 100644 index 00000000..1ecc3294 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go @@ -0,0 +1,44 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUS": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CLD": syscall.SIGCLD, + "CONT": syscall.SIGCONT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "POLL": syscall.SIGPOLL, + "PROF": syscall.SIGPROF, + "PWR": syscall.SIGPWR, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STKFLT": syscall.SIGSTKFLT, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "UNUSED": syscall.SIGUNUSED, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go new file mode 100644 index 00000000..d4fea931 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go @@ -0,0 +1,19 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) + +const ( + // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. + SIGCHLD = syscall.SIGCHLD + // SIGWINCH is a signal sent to a process when its controlling terminal changes its size + SIGWINCH = syscall.SIGWINCH + // DefaultStopSignal is the syscall signal used to stop a container in unix systems. + DefaultStopSignal = "SIGTERM" +) diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go new file mode 100644 index 00000000..161ba273 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!darwin,!freebsd,!windows + +package signal + +import ( + "syscall" +) + +// SignalMap is an empty map of signals for unsupported platform. +var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go new file mode 100644 index 00000000..c80a951c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go @@ -0,0 +1,27 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) +const ( + SIGCHLD = syscall.Signal(0xff) + SIGWINCH = syscall.Signal(0xff) + // DefaultStopSignal is the syscall signal used to stop a container in windows systems. + DefaultStopSignal = "15" +) + +// SignalMap is a map of "supported" signals. As per the comment in GOLang's +// ztypes_windows.go: "More invented values for signals". Windows doesn't +// really support signals in any way, shape or form that Unix does. +// +// We have these so that docker kill can be used to gracefully (TERM) and +// forcibly (KILL) terminate a container on Windows. +var SignalMap = map[string]syscall.Signal{ + "KILL": syscall.SIGKILL, + "TERM": syscall.SIGTERM, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go new file mode 100644 index 00000000..2cf5ccf0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/trap.go @@ -0,0 +1,74 @@ +package signal + +import ( + "os" + gosignal "os/signal" + "runtime" + "sync/atomic" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + go func(sig os.Signal) { + logrus.Infof("Processing signal '%v'", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logrus.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks() + logrus.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +// DumpStacks dumps the runtime stack. +func DumpStacks() { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + // Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine + // traces won't show up in the log. + logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go index 684b4d4c..9be01841 100644 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -9,19 +9,26 @@ import ( ) const ( - StdWriterPrefixLen = 8 - StdWriterFdIndex = 0 - StdWriterSizeIndex = 4 + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 ) -type StdType [StdWriterPrefixLen]byte +// StdType prefixes type and length to standard stream. +type StdType [stdWriterPrefixLen]byte var ( - Stdin StdType = StdType{0: 0} - Stdout StdType = StdType{0: 1} - Stderr StdType = StdType{0: 2} + // Stdin represents standard input stream type. + Stdin = StdType{0: 0} + // Stdout represents standard output stream type. + Stdout = StdType{0: 1} + // Stderr represents standard error steam type. + Stderr = StdType{0: 2} ) +// StdWriter is wrapper of io.Writer with extra customized info. type StdWriter struct { io.Writer prefix StdType @@ -36,10 +43,10 @@ func (w *StdWriter) Write(buf []byte) (n int, err error) { binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) n1, err = w.Writer.Write(w.prefix[:]) if err != nil { - n = n1 - StdWriterPrefixLen + n = n1 - stdWriterPrefixLen } else { n2, err = w.Writer.Write(buf) - n = n1 + n2 - StdWriterPrefixLen + n = n1 + n2 - stdWriterPrefixLen } if n < 0 { n = 0 @@ -61,7 +68,7 @@ func NewStdWriter(w io.Writer, t StdType) *StdWriter { } } -var ErrInvalidStdHeader = errors.New("Unrecognized input header") +var errInvalidStdHeader = errors.New("Unrecognized input header") // StdCopy is a modified version of io.Copy. // @@ -75,7 +82,7 @@ var ErrInvalidStdHeader = errors.New("Unrecognized input header") // `written` will hold the total number of bytes written to `dstout` and `dsterr`. func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { var ( - buf = make([]byte, 32*1024+StdWriterPrefixLen+1) + buf = make([]byte, startingBufLen) bufLen = len(buf) nr, nw int er, ew error @@ -85,12 +92,12 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) for { // Make sure we have at least a full header - for nr < StdWriterPrefixLen { + for nr < stdWriterPrefixLen { var nr2 int nr2, er = src.Read(buf[nr:]) nr += nr2 if er == io.EOF { - if nr < StdWriterPrefixLen { + if nr < stdWriterPrefixLen { logrus.Debugf("Corrupted prefix: %v", buf[:nr]) return written, nil } @@ -103,7 +110,7 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) } // Check the first byte to know where to write - switch buf[StdWriterFdIndex] { + switch buf[stdWriterFdIndex] { case 0: fallthrough case 1: @@ -113,30 +120,30 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) // Write on stderr out = dsterr default: - logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) - return 0, ErrInvalidStdHeader + logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex]) + return 0, errInvalidStdHeader } // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) logrus.Debugf("framesize: %d", frameSize) // Check if the buffer is big enough to read the frame. // Extend it if necessary. - if frameSize+StdWriterPrefixLen > bufLen { - logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf)) - buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) + if frameSize+stdWriterPrefixLen > bufLen { + logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf)) + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) bufLen = len(buf) } // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+StdWriterPrefixLen { + for nr < frameSize+stdWriterPrefixLen { var nr2 int nr2, er = src.Read(buf[nr:]) nr += nr2 if er == io.EOF { - if nr < frameSize+StdWriterPrefixLen { - logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr]) + if nr < frameSize+stdWriterPrefixLen { + logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr]) return written, nil } break @@ -148,7 +155,7 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) } // Write the retrieved frame (without header) - nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) if ew != nil { logrus.Debugf("Error writing frame: %s", ew) return 0, ew @@ -161,8 +168,8 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) written += int64(nw) // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+StdWriterPrefixLen:]) + copy(buf, buf[frameSize+stdWriterPrefixLen:]) // Move the index - nr -= frameSize + StdWriterPrefixLen + nr -= frameSize + stdWriterPrefixLen } } diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go index a9fd73a4..88d88d41 100644 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go @@ -2,6 +2,8 @@ package stdcopy import ( "bytes" + "errors" + "io" "io/ioutil" "strings" "testing" @@ -45,7 +47,143 @@ func TestWrite(t *testing.T) { t.Fatalf("Error while writing with StdWrite") } if n != len(data) { - t.Fatalf("Write should have writen %d byte but wrote %d.", len(data), n) + t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) + } +} + +type errWriter struct { + n int + err error +} + +func (f *errWriter) Write(buf []byte) (int, error) { + return f.n, f.err +} + +func TestWriteWithWriterError(t *testing.T) { + expectedError := errors.New("expected") + expectedReturnedBytes := 10 + writer := NewStdWriter(&errWriter{ + n: stdWriterPrefixLen + expectedReturnedBytes, + err: expectedError}, Stdout) + data := []byte("This won't get written, sigh") + n, err := writer.Write(data) + if err != expectedError { + t.Fatalf("Didn't get expected error.") + } + if n != expectedReturnedBytes { + t.Fatalf("Didn't get expected writen bytes %d, got %d.", + expectedReturnedBytes, n) + } +} + +func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { + writer := NewStdWriter(&errWriter{n: -1}, Stdout) + data := []byte("This won't get written, sigh") + actual, _ := writer.Write(data) + if actual != 0 { + t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) + } +} + +func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { + buffer = new(bytes.Buffer) + dstOut := NewStdWriter(buffer, Stdout) + _, err = dstOut.Write(stdOutBytes) + if err != nil { + return + } + dstErr := NewStdWriter(buffer, Stderr) + _, err = dstErr.Write(stdErrBytes) + return +} + +func TestStdCopyWriteAndRead(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err != nil { + t.Fatal(err) + } + expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) + if written != int64(expectedTotalWritten) { + t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) + } +} + +type customReader struct { + n int + err error + totalCalls int + correctCalls int + src *bytes.Buffer +} + +func (f *customReader) Read(buf []byte) (int, error) { + f.totalCalls++ + if f.totalCalls <= f.correctCalls { + return f.src.Read(buf) + } + return f.n, f.err +} + +func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { + expectedError := errors.New("error") + reader := &customReader{ + err: expectedError} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { + expectedError := errors.New("error") + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: expectedError, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyDetectsCorruptedFrame(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: io.EOF, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != startingBufLen { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != nil { + t.Fatal("Didn't get nil error") } } @@ -71,6 +209,44 @@ func TestStdCopyWithCorruptedPrefix(t *testing.T) { } } +func TestStdCopyReturnsWriteErrors(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + expectedError := errors.New("expected") + + dstOut := &errWriter{err: expectedError} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error, got %v", err) + } +} + +func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + dstOut := &errWriter{n: startingBufLen - 10} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) + } + if err != io.ErrShortWrite { + t.Fatalf("Didn't get expected io.ErrShortWrite error") + } +} + func BenchmarkWrite(b *testing.B) { w := NewStdWriter(ioutil.Discard, Stdout) data := []byte("Test line for testing stdwriter performance\n") diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go index ab1f9d47..266a74ba 100644 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -34,7 +34,7 @@ func TruncateID(id string) string { func generateID(crypto bool) string { b := make([]byte, 32) - var r io.Reader = random.Reader + r := random.Reader if crypto { r = rand.Reader } diff --git a/vendor/github.com/docker/docker/pkg/stringutils/README.md b/vendor/github.com/docker/docker/pkg/stringutils/README.md new file mode 100644 index 00000000..b3e45457 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go new file mode 100644 index 00000000..41a0d2eb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go @@ -0,0 +1,87 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" + + "github.com/docker/docker/pkg/random" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[random.Rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random stirng with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + if len(s) <= maxlen { + return s + } + return s[:maxlen] +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to an program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go new file mode 100644 index 00000000..fec59450 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go @@ -0,0 +1,105 @@ +package stringutils + +import "testing" + +func testLengthHelper(generator func(int) string, t *testing.T) { + expectedLength := 20 + s := generator(expectedLength) + if len(s) != expectedLength { + t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) + } +} + +func testUniquenessHelper(generator func(int) string, t *testing.T) { + repeats := 25 + set := make(map[string]struct{}, repeats) + for i := 0; i < repeats; i = i + 1 { + str := generator(64) + if len(str) != 64 { + t.Fatalf("Id returned is incorrect: %s", str) + } + if _, ok := set[str]; ok { + t.Fatalf("Random number is repeated") + } + set[str] = struct{}{} + } +} + +func isASCII(s string) bool { + for _, c := range s { + if c > 127 { + return false + } + } + return true +} + +func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAsciiStringLength(t *testing.T) { + testLengthHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { + str := GenerateRandomASCIIString(64) + if !isASCII(str) { + t.Fatalf("%s contained non-ascii characters", str) + } +} + +func TestTruncate(t *testing.T) { + str := "teststring" + newstr := Truncate(str, 4) + if newstr != "test" { + t.Fatalf("Expected test, got %s", newstr) + } + newstr = Truncate(str, 20) + if newstr != "teststring" { + t.Fatalf("Expected teststring, got %s", newstr) + } +} + +func TestInSlice(t *testing.T) { + slice := []string{"test", "in", "slice"} + + test := InSlice(slice, "test") + if !test { + t.Fatalf("Expected string test to be in slice") + } + test = InSlice(slice, "SLICE") + if !test { + t.Fatalf("Expected string SLICE to be in slice") + } + test = InSlice(slice, "notinslice") + if test { + t.Fatalf("Expected string notinslice not to be in slice") + } +} + +func TestShellQuoteArgumentsEmpty(t *testing.T) { + actual := ShellQuoteArguments([]string{}) + expected := "" + if actual != expected { + t.Fatalf("Expected an empty string") + } +} + +func TestShellQuoteArguments(t *testing.T) { + simpleString := "simpleString" + complexString := "This is a 'more' complex $tring with some special char *" + actual := ShellQuoteArguments([]string{simpleString, complexString}) + expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" + if actual != expected { + t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) + } +} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/strslice.go b/vendor/github.com/docker/docker/pkg/stringutils/strslice.go new file mode 100644 index 00000000..40557540 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/strslice.go @@ -0,0 +1,71 @@ +package stringutils + +import ( + "encoding/json" + "strings" +) + +// StrSlice representes a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice struct { + parts []string +} + +// MarshalJSON Marshals (or serializes) the StrSlice into the json format. +// This method is needed to implement json.Marshaller. +func (e *StrSlice) MarshalJSON() ([]byte, error) { + if e == nil { + return []byte{}, nil + } + return json.Marshal(e.Slice()) +} + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of strings. +// This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + e.parts = p + return nil +} + +// Len returns the number of parts of the StrSlice. +func (e *StrSlice) Len() int { + if e == nil { + return 0 + } + return len(e.parts) +} + +// Slice gets the parts of the StrSlice as a Slice of string. +func (e *StrSlice) Slice() []string { + if e == nil { + return nil + } + return e.parts +} + +// ToString gets space separated string of all the parts. +func (e *StrSlice) ToString() string { + s := e.Slice() + if s == nil { + return "" + } + return strings.Join(s, " ") +} + +// NewStrSlice creates an StrSlice based on the specified parts (as strings). +func NewStrSlice(parts ...string) *StrSlice { + return &StrSlice{parts} +} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/strslice_test.go b/vendor/github.com/docker/docker/pkg/stringutils/strslice_test.go new file mode 100644 index 00000000..a587784e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/strslice_test.go @@ -0,0 +1,135 @@ +package stringutils + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestStrSliceMarshalJSON(t *testing.T) { + strss := map[*StrSlice]string{ + nil: "", + &StrSlice{}: "null", + &StrSlice{[]string{"/bin/sh", "-c", "echo"}}: `["/bin/sh","-c","echo"]`, + } + + for strs, expected := range strss { + data, err := strs.MarshalJSON() + if err != nil { + t.Fatal(err) + } + if string(data) != expected { + t.Fatalf("Expected %v, got %v", expected, string(data)) + } + } +} + +func TestStrSliceUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + strs := &StrSlice{ + []string{"default", "values"}, + } + if err := strs.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := strs.Slice() + if len(actualParts) != len(expectedParts) { + t.Fatalf("Expected %v parts, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) + } + for index, part := range actualParts { + if part != expectedParts[index] { + t.Fatalf("Expected %v, got %v", expectedParts, actualParts) + break + } + } + } +} + +func TestStrSliceUnmarshalString(t *testing.T) { + var e *StrSlice + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} + +func TestStrSliceUnmarshalSlice(t *testing.T) { + var e *StrSlice + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + slice := e.Slice() + if len(slice) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", slice) + } + + if slice[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", slice[0]) + } +} + +func TestStrSliceToString(t *testing.T) { + slices := map[*StrSlice]string{ + NewStrSlice(""): "", + NewStrSlice("one"): "one", + NewStrSlice("one", "two"): "one two", + } + for s, expected := range slices { + toString := s.ToString() + if toString != expected { + t.Fatalf("Expected %v, got %v", expected, toString) + } + } +} + +func TestStrSliceLen(t *testing.T) { + var emptyStrSlice *StrSlice + slices := map[*StrSlice]int{ + NewStrSlice(""): 1, + NewStrSlice("one"): 1, + NewStrSlice("one", "two"): 2, + emptyStrSlice: 0, + } + for s, expected := range slices { + if s.Len() != expected { + t.Fatalf("Expected %d, got %d", s.Len(), expected) + } + } +} + +func TestStrSliceSlice(t *testing.T) { + var emptyStrSlice *StrSlice + slices := map[*StrSlice][]string{ + NewStrSlice("one"): {"one"}, + NewStrSlice("one", "two"): {"one", "two"}, + emptyStrSlice: nil, + } + for s, expected := range slices { + if !reflect.DeepEqual(s.Slice(), expected) { + t.Fatalf("Expected %v, got %v", s.Slice(), expected) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/README.md b/vendor/github.com/docker/docker/pkg/symlink/README.md index 0d1dbb70..8dba54fd 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/README.md +++ b/vendor/github.com/docker/docker/pkg/symlink/README.md @@ -1,4 +1,5 @@ -Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, +as well as a Windows long-path aware version of filepath.EvalSymlinks from the [Go standard library](https://golang.org/pkg/path/filepath). The code from filepath.EvalSymlinks has been adapted in fs.go. diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go index b4bdff24..dcf707f4 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/fs.go +++ b/vendor/github.com/docker/docker/pkg/symlink/fs.go @@ -12,15 +12,18 @@ import ( "os" "path/filepath" "strings" + + "github.com/docker/docker/pkg/system" ) -// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an +// absolute path. This function handles paths in a platform-agnostic manner. func FollowSymlinkInScope(path, root string) (string, error) { - path, err := filepath.Abs(path) + path, err := filepath.Abs(filepath.FromSlash(path)) if err != nil { return "", err } - root, err = filepath.Abs(root) + root, err = filepath.Abs(filepath.FromSlash(root)) if err != nil { return "", err } @@ -119,7 +122,7 @@ func evalSymlinksInScope(path, root string) (string, error) { if err != nil { return "", err } - if filepath.IsAbs(dest) { + if system.IsAbs(dest) { b.Reset() } path = dest + string(filepath.Separator) + path @@ -129,3 +132,12 @@ func evalSymlinksInScope(path, root string) (string, error) { // what's happening here return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil } + +// EvalSymlinks returns the path name after the evaluation of any symbolic +// links. +// If path is relative the result will be relative to the current directory, +// unless one of the components is an absolute symbolic link. +// This version has been updated to support long paths prepended with `\\?\`. +func EvalSymlinks(path string) (string, error) { + return evalSymlinks(path) +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go new file mode 100644 index 00000000..818004f2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package symlink + +import ( + "path/filepath" +) + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go new file mode 100644 index 00000000..29bd4568 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go @@ -0,0 +1,156 @@ +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/longpath" +) + +func toShort(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetShortPathName says we can reuse buffer + n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + return syscall.UTF16ToString(b), nil +} + +func toLong(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + b = b[:n] + return syscall.UTF16ToString(b), nil +} + +func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + + p, err := toShort(path) + if err != nil { + return "", err + } + p, err = toLong(p) + if err != nil { + return "", err + } + // syscall.GetLongPathName does not change the case of the drive letter, + // but the result of EvalSymlinks must be unique, so we have + // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). + // Make drive letter upper case. + if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { + p = string(p[0]+'A'-'a') + p[1:] + } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { + p = p[:3] + string(p[4]+'A'-'a') + p[5:] + } + return filepath.Clean(p), nil +} + +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { + const maxIter = 255 + originalPath := path + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("EvalSymlinks: too many links in " + originalPath) + } + + // A path beginnging with `\\?\` represents the root, so automatically + // skip that part and begin processing the next segment. + if strings.HasPrefix(path, longpath.Prefix) { + b.WriteString(longpath.Prefix) + path = path[4:] + continue + } + + // find next path component, p + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + if b.Len() == 0 { + // must be absolute path + b.WriteRune(filepath.Separator) + } + continue + } + + // If this is the first segment after the long path prefix, accept the + // current segment as a volume root or UNC share and move on to the next. + if b.String() == longpath.Prefix { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + fi, err := os.Lstat(b.String() + p) + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { + b.WriteRune(filepath.Separator) + } + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(b.String() + p) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + return filepath.Clean(b.String()), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 00000000..31ed9ff1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,31 @@ +package system + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + // The max Unix time is 33 bits set + unixMaxTime := unixMinTime.Add((1<<33 - 1) * time.Second) + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go new file mode 100644 index 00000000..f65a4b80 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go @@ -0,0 +1,120 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" +) + +// prepareTempFile creates a temporary file in a temporary directory. +func prepareTempFile(t *testing.T) (string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + return file, dir +} + +// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent +func TestChtimes(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + // The max Unix time is 33 bits set + unixMaxTime := unixEpochTime.Add((1<<33 - 1) * time.Second) + afterUnixMaxTime := unixMaxTime.Add(100 * time.Second) + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixMaxTime { + t.Fatalf("Expected: %s, got: %s", unixMaxTime, f.ModTime()) + } + + // Test aTime after Unix max time and mTime set to Unix max time + Chtimes(file, afterUnixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixMaxTime { + t.Fatalf("Expected: %s, got: %s", unixMaxTime, f.ModTime()) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixMaxTime, afterUnixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go new file mode 100644 index 00000000..6998bbef --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go @@ -0,0 +1,121 @@ +// +build linux freebsd + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimes tests Chtimes access time on a tempfile on Linux +func TestChtimesLinux(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + // The max Unix time is 33 bits set + unixMaxTime := unixEpochTime.Add((1<<33 - 1) * time.Second) + afterUnixMaxTime := unixMaxTime.Add(100 * time.Second) + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat := f.Sys().(*syscall.Stat_t) + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixMaxTime { + t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) + } + + // Test aTime after Unix max time and mTime set to Unix max time + Chtimes(file, afterUnixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixMaxTime, afterUnixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixMaxTime { + t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go new file mode 100644 index 00000000..f09c4028 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go @@ -0,0 +1,114 @@ +// +build windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimes tests Chtimes access time on a tempfile on Windows +func TestChtimesWindows(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + // The max Unix time is 33 bits set + unixMaxTime := unixEpochTime.Add((1<<33 - 1) * time.Second) + afterUnixMaxTime := unixMaxTime.Add(100 * time.Second) + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixMaxTime { + t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) + } + + // Test aTime after Unix max time and mTime set to Unix max time + Chtimes(file, afterUnixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixMaxTime, afterUnixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixMaxTime { + t.Fatalf("Expected: %s, got: %s", unixMaxTime, aTime) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go index 63045186..28831898 100644 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -5,5 +5,6 @@ import ( ) var ( + // ErrNotSupportedPlatform means the platform is not supported. ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") ) diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go index 23f7c618..04e2de78 100644 --- a/vendor/github.com/docker/docker/pkg/system/events_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go @@ -8,11 +8,6 @@ import ( "unsafe" ) -const ( - EVENT_ALL_ACCESS = 0x1F0003 - EVENT_MODIFY_STATUS = 0x0002 -) - var ( procCreateEvent = modkernel32.NewProc("CreateEventW") procOpenEvent = modkernel32.NewProc("OpenEventW") @@ -21,13 +16,14 @@ var ( procPulseEvent = modkernel32.NewProc("PulseEvent") ) +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 = 0 + var _p1 uint32 if manualReset { _p1 = 1 } - var _p2 uint32 = 0 + var _p2 uint32 if initialState { _p2 = 1 } @@ -40,9 +36,10 @@ func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, return } +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 = 0 + var _p1 uint32 if inheritHandle { _p1 = 1 } @@ -55,14 +52,17 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle sy return } +// SetEvent implements win32 SetEvent func in golang. func SetEvent(handle syscall.Handle) (err error) { return setResetPulse(handle, procSetEvent) } +// ResetEvent implements win32 ResetEvent func in golang. func ResetEvent(handle syscall.Handle) (err error) { return setResetPulse(handle, procResetEvent) } +// PulseEvent implements win32 PulseEvent func in golang. func PulseEvent(handle syscall.Handle) (err error) { return setResetPulse(handle, procPulseEvent) } diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go index e1f70e8d..c14feb84 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -4,8 +4,16 @@ package system import ( "os" + "path/filepath" ) +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index 90b50060..16823d55 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -4,7 +4,9 @@ package system import ( "os" + "path/filepath" "regexp" + "strings" "syscall" ) @@ -62,3 +64,19 @@ func MkdirAll(path string, perm os.FileMode) error { } return nil } + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go index d0e43b37..bd23c4d5 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat.go @@ -7,10 +7,10 @@ import ( ) // Lstat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. +// a system.StatT type pertaining to that file. // // Throws an error if the file does not exist -func Lstat(path string) (*Stat_t, error) { +func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_test.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go similarity index 95% rename from vendor/github.com/docker/docker/pkg/system/lstat_test.go rename to vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go index 6bac492e..062cf53b 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_test.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go @@ -1,3 +1,5 @@ +// +build linux freebsd + package system import ( diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go index eee1be26..49e87eb4 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -6,21 +6,17 @@ import ( "os" ) -// Some explanation for my own sanity, and hopefully maintainers in the -// future. -// // Lstat calls os.Lstat to get a fileinfo interface back. // This is then copied into our own locally defined structure. // Note the Linux version uses fromStatT to do the copy back, // but that not strictly necessary when already in an OS specific module. - -func Lstat(path string) (*Stat_t, error) { +func Lstat(path string) (*StatT, error) { fi, err := os.Lstat(path) if err != nil { return nil, err } - return &Stat_t{ + return &StatT{ name: fi.Name(), size: fi.Size(), mode: fi.Mode(), diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go index e2ca1400..a07bb17c 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -2,7 +2,6 @@ package system import ( "bufio" - "errors" "io" "os" "strconv" @@ -11,10 +10,6 @@ import ( "github.com/docker/docker/pkg/units" ) -var ( - ErrMalformed = errors.New("malformed file") -) - // ReadMemInfo retrieves memory statistics of the host system and returns a // MemInfo type. func ReadMemInfo() (*MemInfo, error) { diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go similarity index 97% rename from vendor/github.com/docker/docker/pkg/system/meminfo_linux_test.go rename to vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go index 10ddf796..c8fec629 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go @@ -1,3 +1,5 @@ +// +build linux freebsd + package system import ( diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go index 604d3387..82ddd30c 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -2,6 +2,7 @@ package system +// ReadMemInfo is not supported on platforms other than linux and windows. func ReadMemInfo() (*MemInfo, error) { return nil, ErrNotSupportedPlatform } diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go index 26617eb0..73958182 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -7,14 +7,16 @@ import ( ) // Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev +// with attributes specified by mode and dev. func Mknod(path string, mode uint32, dev int) error { return syscall.Mknod(path, mode, dev) } +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor +// then the top 12 bits of the minor. func Mkdev(major int64, minor int64) uint32 { return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) } diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go index 1811542a..2e863c02 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -2,10 +2,12 @@ package system +// Mknod is not implemented on Windows. func Mknod(path string, mode uint32, dev int) error { return ErrNotSupportedPlatform } +// Mkdev is not implemented on Windows. func Mkdev(major int64, minor int64) uint32 { panic("Mkdev not implemented on Windows.") } diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go index e2ecfe52..087034c5 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat.go +++ b/vendor/github.com/docker/docker/pkg/system/stat.go @@ -6,9 +6,9 @@ import ( "syscall" ) -// Stat_t type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file -type Stat_t struct { +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { mode uint32 uid uint32 gid uint32 @@ -17,30 +17,37 @@ type Stat_t struct { mtim syscall.Timespec } -func (s Stat_t) Mode() uint32 { +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { return s.mode } -func (s Stat_t) Uid() uint32 { +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { return s.uid } -func (s Stat_t) Gid() uint32 { +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { return s.gid } -func (s Stat_t) Rdev() uint64 { +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { return s.rdev } -func (s Stat_t) Size() int64 { +// Size returns file's size. +func (s StatT) Size() int64 { return s.size } -func (s Stat_t) Mtim() syscall.Timespec { +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { return s.mtim } -func (s Stat_t) GetLastModification() syscall.Timespec { +// GetLastModification returns file's last modification time. +func (s StatT) GetLastModification() syscall.Timespec { return s.Mtim() } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go index 4b2198b3..d0fb6f15 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -5,8 +5,8 @@ import ( ) // fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { - return &Stat_t{size: s.Size, +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, @@ -18,7 +18,7 @@ func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { // a system.Stat_t type pertaining to that file. // // Throws an error if the file does not exist -func Stat(path string) (*Stat_t, error) { +func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 80262d95..8b1eded1 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -5,8 +5,8 @@ import ( ) // fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { - return &Stat_t{size: s.Size, +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, mode: s.Mode, uid: s.Uid, gid: s.Gid, @@ -14,17 +14,17 @@ func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { mtim: s.Mtim}, nil } -// FromStatT exists only on linux, and loads a system.Stat_t from a +// FromStatT exists only on linux, and loads a system.StatT from a // syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*Stat_t, error) { +func FromStatT(s *syscall.Stat_t) (*StatT, error) { return fromStatT(s) } // Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. +// a system.StatT type pertaining to that file. // // Throws an error if the file does not exist -func Stat(path string) (*Stat_t, error) { +func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/pkg/system/stat_test.go b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go similarity index 88% rename from vendor/github.com/docker/docker/pkg/system/stat_test.go rename to vendor/github.com/docker/docker/pkg/system/stat_unix_test.go index 45341292..dee8d30a 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_test.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go @@ -1,3 +1,5 @@ +// +build linux freebsd + package system import ( @@ -22,10 +24,10 @@ func TestFromStatT(t *testing.T) { if stat.Mode != s.Mode() { t.Fatal("got invalid mode") } - if stat.Uid != s.Uid() { + if stat.Uid != s.UID() { t.Fatal("got invalid uid") } - if stat.Gid != s.Gid() { + if stat.Gid != s.GID() { t.Fatal("got invalid gid") } if stat.Rdev != s.Rdev() { diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go index 5251ae21..381ea821 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -6,9 +6,9 @@ import ( "syscall" ) -// fromStatT creates a system.Stat_t type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { - return &Stat_t{size: s.Size, +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index b1fd39e8..39490c62 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -7,7 +7,9 @@ import ( "time" ) -type Stat_t struct { +// StatT type contains status of a file. It contains metadata +// like name, permission, size, etc about a file. +type StatT struct { name string size int64 mode os.FileMode @@ -15,22 +17,27 @@ type Stat_t struct { isDir bool } -func (s Stat_t) Name() string { +// Name returns file's name. +func (s StatT) Name() string { return s.name } -func (s Stat_t) Size() int64 { +// Size returns file's size. +func (s StatT) Size() int64 { return s.size } -func (s Stat_t) Mode() os.FileMode { +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { return s.mode } -func (s Stat_t) ModTime() time.Time { +// ModTime returns file's last modification time. +func (s StatT) ModTime() time.Time { return s.modTime } -func (s Stat_t) IsDir() bool { +// IsDir returns whether file is actually a directory. +func (s StatT) IsDir() bool { return s.isDir } diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 00000000..f1497c58 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd + +package system + +import "syscall" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 00000000..273aa234 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,36 @@ +package system + +import ( + "fmt" + "syscall" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() (OSVersion, error) { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + return osv, fmt.Errorf("Failed to call GetVersion()") + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv, nil +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go index fddbecd3..c670fcd7 100644 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ b/vendor/github.com/docker/docker/pkg/system/umask.go @@ -6,6 +6,8 @@ import ( "syscall" ) +// Umask sets current process's file mode creation mask to newmask +// and return oldmask. func Umask(newmask int) (oldmask int, err error) { return syscall.Umask(newmask), nil } diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go index 3be563f8..13f1de17 100644 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -2,6 +2,7 @@ package system +// Umask is not supported on the windows platform. func Umask(newmask int) (oldmask int, err error) { // should not be called on cli code path return 0, ErrNotSupportedPlatform diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go b/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go index 4c6002fe..0a161975 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go @@ -2,10 +2,7 @@ package system import "syscall" +// LUtimesNano is not supported by darwin platform. func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } - -func UtimesNano(path string, ts []syscall.Timespec) error { - return syscall.UtimesNano(path, ts) -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go index ceaa044c..e2eac3b5 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -5,6 +5,8 @@ import ( "unsafe" ) +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { var _path *byte _path, err := syscall.BytePtrFromString(path) @@ -18,7 +20,3 @@ func LUtimesNano(path string, ts []syscall.Timespec) error { return nil } - -func UtimesNano(path string, ts []syscall.Timespec) error { - return syscall.UtimesNano(path, ts) -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go index 8f902982..007bfa8c 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -5,10 +5,12 @@ import ( "unsafe" ) +// LUtimesNano is used to change access and modification time of the speficied path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { // These are not currently available in syscall - AT_FDCWD := -100 - AT_SYMLINK_NOFOLLOW := 0x100 + atFdCwd := -100 + atSymLinkNoFollow := 0x100 var _path *byte _path, err := syscall.BytePtrFromString(path) @@ -16,13 +18,9 @@ func LUtimesNano(path string, ts []syscall.Timespec) error { return err } - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS { + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { return err } return nil } - -func UtimesNano(path string, ts []syscall.Timespec) error { - return syscall.UtimesNano(path, ts) -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_test.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go similarity index 98% rename from vendor/github.com/docker/docker/pkg/system/utimes_test.go rename to vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go index 350cce1e..1ee0d099 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_test.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go @@ -1,3 +1,5 @@ +// +build linux freebsd + package system import ( diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go index adf2734f..50c3a043 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -4,10 +4,7 @@ package system import "syscall" +// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } - -func UtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index 00edb201..d2e2c057 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -5,7 +5,9 @@ import ( "unsafe" ) -// Returns a nil slice and nil error if the xattr is not set +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { pathBytes, err := syscall.BytePtrFromString(path) if err != nil { @@ -36,6 +38,8 @@ func Lgetxattr(path string, attr string) ([]byte, error) { var _zero uintptr +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { pathBytes, err := syscall.BytePtrFromString(path) if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go index 0060c167..0114f222 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -2,10 +2,12 @@ package system +// Lgetxattr is not supported on platforms other than linux. func Lgetxattr(path string, attr string) ([]byte, error) { return nil, ErrNotSupportedPlatform } +// Lsetxattr is not supported on platforms other than linux. func Lsetxattr(path string, attr string, data []byte, flags int) error { return ErrNotSupportedPlatform } diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go index d2df58c7..0f5783be 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -146,7 +146,7 @@ var ( } ) -// TarSum default is "sha256" +// DefaultTHash is default TarSum hashing algoritm - "sha256". var DefaultTHash = NewTHash("sha256", sha256.New) type simpleTHash struct { diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md index 51e95373..77927ee7 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md @@ -10,6 +10,11 @@ This document describes the algorithms used in performing the TarSum checksum calculation on filesystem layers, the need for this method over existing methods, and the versioning of this calculation. +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. ## Introduction diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go index 8988b9f5..28822868 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go @@ -95,17 +95,17 @@ func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]s func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { return [][2]string{ {"name", h.Name}, - {"mode", strconv.Itoa(int(h.Mode))}, + {"mode", strconv.FormatInt(h.Mode, 10)}, {"uid", strconv.Itoa(h.Uid)}, {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.Itoa(int(h.Size))}, - {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, {"typeflag", string([]byte{h.Typeflag})}, {"linkname", h.Linkname}, {"uname", h.Uname}, {"gname", h.Gname}, - {"devmajor", strconv.Itoa(int(h.Devmajor))}, - {"devminor", strconv.Itoa(int(h.Devminor))}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, } } diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go index da9295ee..04870d1b 100644 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -7,9 +7,11 @@ import ( "io" "os" "os/signal" + "syscall" "github.com/Azure/go-ansiterm/winterm" "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/term/windows" ) @@ -36,10 +38,66 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // MSYS (mingw) does not emulate ANSI well. return windows.ConsoleStreams() default: + if useNativeConsole() { + return os.Stdin, os.Stdout, os.Stderr + } return windows.ConsoleStreams() } } +// useNativeConsole determines if the docker client should use the built-in +// console which supports ANSI emulation, or fall-back to the golang emulator +// (github.com/azure/go-ansiterm). +func useNativeConsole() bool { + osv, err := system.GetOSVersion() + if err != nil { + return false + } + + // Native console is not available major version 10 + if osv.MajorVersion < 10 { + return false + } + + // Must have a late pre-release TP4 build of Windows Server 2016/Windows 10 TH2 or later + if osv.Build < 10578 { + return false + } + + // Environment variable override + if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" { + if e == "1" { + return true + } + return false + } + + // Get the handle to stdout + stdOutHandle, err := syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE) + if err != nil { + return false + } + + // Get the console mode from the consoles stdout handle + var mode uint32 + if err := syscall.GetConsoleMode(stdOutHandle, &mode); err != nil { + return false + } + + // Legacy mode does not have native ANSI emulation. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + const enableVirtualTerminalProcessing = 0x0004 + if mode&enableVirtualTerminalProcessing == 0 { + return false + } + + // TODO Windows (Post TP4). The native emulator still has issues which + // mean it shouldn't be enabled for everyone. Change this next line to true + // to change the default to "enable if available". In the meantime, users + // can still try it out by using USE_NATIVE_CONSOLE env variable. + return false +} + // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { return windows.GetHandleInfo(in) diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go index 53becb01..bb47e120 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -10,8 +10,8 @@ import ( "strings" "unsafe" - . "github.com/Azure/go-ansiterm" - . "github.com/Azure/go-ansiterm/winterm" + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" ) // ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. @@ -26,12 +26,12 @@ type ansiReader struct { } func newAnsiReader(nFile int) *ansiReader { - file, fd := GetStdFile(nFile) + file, fd := winterm.GetStdFile(nFile) return &ansiReader{ file: file, fd: fd, - command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(KEY_ESC_CSI), + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), buffer: make([]byte, 0), } } @@ -86,7 +86,7 @@ func (ar *ansiReader) Read(p []byte) (int, error) { ar.buffer = keyBytes[len(p):] keyBytes = keyBytes[:len(p)] } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translater") + logger.Debug("No key bytes returned from the translator") return 0, nil } @@ -101,28 +101,28 @@ func (ar *ansiReader) Read(p []byte) (int, error) { } // readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]INPUT_RECORD, error) { +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { // Determine the maximum number of records to retrieve // -- Cast around the type system to obtain the size of a single INPUT_RECORD. // unsafe.Sizeof requires an expression vs. a type-reference; the casting // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) countRecords := maxBytes / recordSize - if countRecords > MAX_INPUT_EVENTS { - countRecords = MAX_INPUT_EVENTS + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS } logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) // Wait for and read input events - events := make([]INPUT_RECORD, countRecords) + events := make([]winterm.INPUT_RECORD, countRecords) nEvents := uint32(0) - eventsExist, err := WaitForSingleObject(fd, WAIT_INFINITE) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) if err != nil { return nil, err } if eventsExist { - err = ReadConsoleInput(fd, events, &nEvents) + err = winterm.ReadConsoleInput(fd, events, &nEvents) if err != nil { return nil, err } @@ -135,43 +135,43 @@ func readInputEvents(fd uintptr, maxBytes int) ([]INPUT_RECORD, error) { // KeyEvent Translation Helpers -var arrowKeyMapPrefix = map[WORD]string{ - VK_UP: "%s%sA", - VK_DOWN: "%s%sB", - VK_RIGHT: "%s%sC", - VK_LEFT: "%s%sD", +var arrowKeyMapPrefix = map[winterm.WORD]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", } -var keyMapPrefix = map[WORD]string{ - VK_UP: "\x1B[%sA", - VK_DOWN: "\x1B[%sB", - VK_RIGHT: "\x1B[%sC", - VK_LEFT: "\x1B[%sD", - VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - VK_END: "\x1B[4%s~", // showkey shows ^[[4 - VK_INSERT: "\x1B[2%s~", - VK_DELETE: "\x1B[3%s~", - VK_PRIOR: "\x1B[5%s~", - VK_NEXT: "\x1B[6%s~", - VK_F1: "", - VK_F2: "", - VK_F3: "\x1B[13%s~", - VK_F4: "\x1B[14%s~", - VK_F5: "\x1B[15%s~", - VK_F6: "\x1B[17%s~", - VK_F7: "\x1B[18%s~", - VK_F8: "\x1B[19%s~", - VK_F9: "\x1B[20%s~", - VK_F10: "\x1B[21%s~", - VK_F11: "\x1B[23%s~", - VK_F12: "\x1B[24%s~", +var keyMapPrefix = map[winterm.WORD]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", } // translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []INPUT_RECORD, escapeSequence []byte) []byte { +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { var buffer bytes.Buffer for _, event := range events { - if event.EventType == KEY_EVENT && event.KeyEvent.KeyDown != 0 { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) } } @@ -180,7 +180,7 @@ func translateKeyEvents(events []INPUT_RECORD, escapeSequence []byte) []byte { } // keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *KEY_EVENT_RECORD, escapeSequence []byte) string { +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { if keyEvent.UnicodeChar == 0 { return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) } @@ -199,16 +199,16 @@ func keyToString(keyEvent *KEY_EVENT_RECORD, escapeSequence []byte) string { // +Key generates ESC N Key if !control && alt { - return KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) } return string(keyEvent.UnicodeChar) } // formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key WORD, controlState DWORD, escapeSequence []byte) string { +func formatVirtualKey(key winterm.WORD, controlState winterm.DWORD, escapeSequence []byte) string { shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control, false) + modifier := getControlKeysModifier(shift, alt, control) if format, ok := arrowKeyMapPrefix[key]; ok { return fmt.Sprintf(format, escapeSequence, modifier) @@ -222,35 +222,35 @@ func formatVirtualKey(key WORD, controlState DWORD, escapeSequence []byte) strin } // getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState DWORD) (shift, alt, control bool) { - shift = 0 != (controlState & SHIFT_PRESSED) - alt = 0 != (controlState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (LEFT_CTRL_PRESSED | RIGHT_CTRL_PRESSED)) +func getControlKeys(controlState winterm.DWORD) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) return shift, alt, control } // getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control, meta bool) string { +func getControlKeysModifier(shift, alt, control bool) string { if shift && alt && control { - return KEY_CONTROL_PARAM_8 + return ansiterm.KEY_CONTROL_PARAM_8 } if alt && control { - return KEY_CONTROL_PARAM_7 + return ansiterm.KEY_CONTROL_PARAM_7 } if shift && control { - return KEY_CONTROL_PARAM_6 + return ansiterm.KEY_CONTROL_PARAM_6 } if control { - return KEY_CONTROL_PARAM_5 + return ansiterm.KEY_CONTROL_PARAM_5 } if shift && alt { - return KEY_CONTROL_PARAM_4 + return ansiterm.KEY_CONTROL_PARAM_4 } if alt { - return KEY_CONTROL_PARAM_3 + return ansiterm.KEY_CONTROL_PARAM_3 } if shift { - return KEY_CONTROL_PARAM_2 + return ansiterm.KEY_CONTROL_PARAM_2 } return "" } diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go index a22d47fe..9f3232c0 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -6,8 +6,8 @@ import ( "io/ioutil" "os" - . "github.com/Azure/go-ansiterm" - . "github.com/Azure/go-ansiterm/winterm" + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" "github.com/Sirupsen/logrus" ) @@ -17,17 +17,17 @@ var logger *logrus.Logger type ansiWriter struct { file *os.File fd uintptr - infoReset *CONSOLE_SCREEN_BUFFER_INFO + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO command []byte escapeSequence []byte inAnsiSequence bool - parser *AnsiParser + parser *ansiterm.AnsiParser } func newAnsiWriter(nFile int) *ansiWriter { logFile := ioutil.Discard - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { logFile, _ = os.Create("ansiReaderWriter.log") } @@ -37,21 +37,21 @@ func newAnsiWriter(nFile int) *ansiWriter { Level: logrus.DebugLevel, } - file, fd := GetStdFile(nFile) - info, err := GetConsoleScreenBufferInfo(fd) + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { return nil } - parser := CreateParser("Ground", CreateWinEventHandler(fd, file)) + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) logger.Infof("newAnsiWriter: parser %p", parser) aw := &ansiWriter{ file: file, fd: fd, infoReset: info, - command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(KEY_ESC_CSI), + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), parser: parser, } diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go index ecd1c592..3711d988 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go @@ -7,10 +7,10 @@ import ( "os" "syscall" - . "github.com/Azure/go-ansiterm/winterm" + "github.com/Azure/go-ansiterm/winterm" ) -// ConsoleStreams, for each standard stream referencing a console, returns a wrapped version +// ConsoleStreams returns a wrapped version for each standard stream referencing a console, // that handles ANSI character sequences. func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { if IsConsole(os.Stdin.Fd()) { @@ -56,6 +56,6 @@ func GetHandleInfo(in interface{}) (uintptr, bool) { // IsConsole returns true if the given file descriptor is a Windows Console. // The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. func IsConsole(fd uintptr) bool { - _, e := GetConsoleMode(fd) + _, e := winterm.GetConsoleMode(fd) return e == nil } diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/config.go b/vendor/github.com/docker/docker/pkg/tlsconfig/config.go index 88f768ae..e3dfad1f 100644 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/config.go +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/config.go @@ -47,8 +47,9 @@ var clientCipherSuites = []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, } -// For use by code which already has a crypto/tls options struct but wants to -// use a commonly accepted set of TLS cipher suites, with known weak algorithms removed +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) // ServerDefault is a secure-enough TLS configuration for the server TLS configuration. @@ -72,10 +73,10 @@ func certPool(caFile string) (*x509.CertPool, error) { certPool := x509.NewCertPool() pem, err := ioutil.ReadFile(caFile) if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %s: %v", caFile, err) + return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) } if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %s", caFile) + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } s := certPool.Subjects() subjects := make([]string, len(s)) @@ -116,9 +117,9 @@ func Server(options Options) (*tls.Config, error) { tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (%s, %s): %v", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } - return nil, fmt.Errorf("Error reading X509 key pair (%s, %s): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} if options.ClientAuth >= tls.VerifyClientCertIfGiven { diff --git a/vendor/github.com/docker/docker/pkg/units/size.go b/vendor/github.com/docker/docker/pkg/units/size.go index 2fde3b41..3b59daff 100644 --- a/vendor/github.com/docker/docker/pkg/units/size.go +++ b/vendor/github.com/docker/docker/pkg/units/size.go @@ -49,7 +49,7 @@ func CustomSize(format string, size float64, base float64, _map []string) string } // HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB"). +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). func HumanSize(size float64) string { return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) } diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go index 95f73129..e8f2287e 100644 --- a/vendor/github.com/docker/docker/registry/config.go +++ b/vendor/github.com/docker/docker/registry/config.go @@ -8,7 +8,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/reference" "github.com/docker/docker/image" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" @@ -23,23 +23,19 @@ type Options struct { const ( // DefaultNamespace is the default namespace DefaultNamespace = "docker.io" - // DefaultV2Registry is the URI of the default v2 registry - DefaultV2Registry = "https://registry-1.docker.io" // DefaultRegistryVersionHeader is the name of the default HTTP header // that carries Registry version info DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" - // DefaultV1Registry is the URI of the default v1 registry - DefaultV1Registry = "https://index.docker.io" - - // CertsDir is the directory where certificates are stored - CertsDir = "/etc/docker/certs.d" // IndexServer is the v1 registry server used for user auth + account creation IndexServer = DefaultV1Registry + "/v1/" // IndexName is the name of the index IndexName = "docker.io" + // NotaryServer is the endpoint serving the Notary trust server NotaryServer = "https://notary.docker.io" + + // IndexServer = "https://registry-stage.hub.docker.com/v1/" ) var ( @@ -48,6 +44,10 @@ var ( ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") emptyServiceConfig = NewServiceConfig(nil) + + // V2Only controls access to legacy registries. If it is set to true via the + // command line flag the daemon will not attempt to contact v1 legacy registries + V2Only = false ) // InstallFlags adds command-line options to the top-level flag parser for @@ -57,6 +57,7 @@ func (options *Options) InstallFlags(cmd *flag.FlagSet, usageFn func(string) str cmd.Var(&options.Mirrors, []string{"-registry-mirror"}, usageFn("Preferred Docker registry mirror")) options.InsecureRegistries = opts.NewListOpts(ValidateIndexName) cmd.Var(&options.InsecureRegistries, []string{"-insecure-registry"}, usageFn("Enable insecure registry communication")) + cmd.BoolVar(&V2Only, []string{"-disable-legacy-registry"}, false, "Do not contact legacy registries") } type netIPNet net.IPNet @@ -225,7 +226,8 @@ func validateRemoteName(remoteName string) error { } } - return v2.ValidateRepositoryName(remoteName) + _, err := reference.WithName(remoteName) + return err } func validateNoSchema(reposName string) error { @@ -299,14 +301,17 @@ func splitReposName(reposName string) (string, string) { } // NewRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func (config *ServiceConfig) NewRepositoryInfo(reposName string) (*RepositoryInfo, error) { +func (config *ServiceConfig) NewRepositoryInfo(reposName string, bySearch bool) (*RepositoryInfo, error) { if err := validateNoSchema(reposName); err != nil { return nil, err } indexName, remoteName := splitReposName(reposName) - if err := validateRemoteName(remoteName); err != nil { - return nil, err + + if !bySearch { + if err := validateRemoteName(remoteName); err != nil { + return nil, err + } } repoInfo := &RepositoryInfo{ @@ -358,7 +363,18 @@ func (repoInfo *RepositoryInfo) GetSearchTerm() string { // ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but // lacks registry configuration. func ParseRepositoryInfo(reposName string) (*RepositoryInfo, error) { - return emptyServiceConfig.NewRepositoryInfo(reposName) + return emptyServiceConfig.NewRepositoryInfo(reposName, false) +} + +// ParseIndexInfo will use repository name to get back an indexInfo. +func ParseIndexInfo(reposName string) (*IndexInfo, error) { + indexName, _ := splitReposName(reposName) + + indexInfo, err := emptyServiceConfig.NewIndexInfo(indexName) + if err != nil { + return nil, err + } + return indexInfo, nil } // NormalizeLocalName transforms a repository name into a normalize LocalName diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go new file mode 100644 index 00000000..32f167d0 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_unix.go @@ -0,0 +1,22 @@ +// +build !windows + +package registry + +const ( + // DefaultV1Registry is the URI of the default v1 registry + DefaultV1Registry = "https://index.docker.io" + + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = "https://registry-1.docker.io" + + // CertsDir is the directory where certificates are stored + CertsDir = "/etc/docker/certs.d" +) + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:/index.docker.io/v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return s +} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go new file mode 100644 index 00000000..d01b2618 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_windows.go @@ -0,0 +1,30 @@ +package registry + +import ( + "os" + "path/filepath" + "strings" +) + +const ( + // DefaultV1Registry is the URI of the default v1 registry + DefaultV1Registry = "https://registry-win-tp3.docker.io" + + // DefaultV2Registry is the URI of the default (official) v2 registry. + // This is the windows-specific endpoint. + // + // Currently it is a TEMPORARY link that allows Microsoft to continue + // development of Docker Engine for Windows. + DefaultV2Registry = "https://registry-win-tp3.docker.io" +) + +// CertsDir is the directory where certificates are stored +var CertsDir = os.Getenv("programdata") + `\docker\certs.d` + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:\index.docker.io\v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return filepath.FromSlash(strings.Replace(s, ":", "", -1)) +} diff --git a/vendor/github.com/docker/docker/registry/endpoint.go b/vendor/github.com/docker/docker/registry/endpoint.go index b7aaedaa..20805767 100644 --- a/vendor/github.com/docker/docker/registry/endpoint.go +++ b/vendor/github.com/docker/docker/registry/endpoint.go @@ -42,8 +42,9 @@ func scanForAPIVersion(address string) (string, APIVersion) { return address, APIVersionUnknown } -// NewEndpoint parses the given address to return a registry endpoint. -func NewEndpoint(index *IndexInfo, metaHeaders http.Header) (*Endpoint, error) { +// NewEndpoint parses the given address to return a registry endpoint. v can be used to +// specify a specific endpoint version +func NewEndpoint(index *IndexInfo, metaHeaders http.Header, v APIVersion) (*Endpoint, error) { tlsConfig, err := newTLSConfig(index.Name, index.Secure) if err != nil { return nil, err @@ -52,6 +53,9 @@ func NewEndpoint(index *IndexInfo, metaHeaders http.Header) (*Endpoint, error) { if err != nil { return nil, err } + if v != APIVersionUnknown { + endpoint.Version = v + } if err := validateEndpoint(endpoint); err != nil { return nil, err } @@ -111,11 +115,6 @@ func newEndpoint(address string, tlsConfig *tls.Config, metaHeaders http.Header) return endpoint, nil } -// GetEndpoint returns a new endpoint with the specified headers -func (repoInfo *RepositoryInfo) GetEndpoint(metaHeaders http.Header) (*Endpoint, error) { - return NewEndpoint(repoInfo.Index, metaHeaders) -} - // Endpoint stores basic information about a registry endpoint. type Endpoint struct { client *http.Client diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go index 74f731bd..e8eb4785 100644 --- a/vendor/github.com/docker/docker/registry/registry.go +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -1,3 +1,4 @@ +// Package registry contains client primitives to interact with a remote Docker registry. package registry import ( @@ -17,8 +18,9 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/tlsconfig" "github.com/docker/docker/pkg/useragent" @@ -37,16 +39,20 @@ var dockerUserAgent string func init() { httpVersion := make([]useragent.VersionInfo, 0, 6) - httpVersion = append(httpVersion, useragent.VersionInfo{"docker", dockerversion.VERSION}) - httpVersion = append(httpVersion, useragent.VersionInfo{"go", runtime.Version()}) - httpVersion = append(httpVersion, useragent.VersionInfo{"git-commit", dockerversion.GITCOMMIT}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: dockerversion.Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: dockerversion.GitCommit}) if kernelVersion, err := kernel.GetKernelVersion(); err == nil { - httpVersion = append(httpVersion, useragent.VersionInfo{"kernel", kernelVersion.String()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) } - httpVersion = append(httpVersion, useragent.VersionInfo{"os", runtime.GOOS}) - httpVersion = append(httpVersion, useragent.VersionInfo{"arch", runtime.GOARCH}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) dockerUserAgent = useragent.AppendVersions("", httpVersion...) + + if runtime.GOOS != "linux" { + V2Only = true + } } func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { @@ -56,7 +62,7 @@ func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { tlsConfig.InsecureSkipVerify = !isSecure if isSecure { - hostDir := filepath.Join(CertsDir, hostname) + hostDir := filepath.Join(CertsDir, cleanPath(hostname)) logrus.Debugf("hostDir: %s", hostDir) if err := ReadCertsDirectory(&tlsConfig, hostDir); err != nil { return nil, err @@ -184,7 +190,7 @@ func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Reque func shouldV2Fallback(err errcode.Error) bool { logrus.Debugf("v2 error: %T %v", err, err) switch err.Code { - case v2.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown: + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown: return true } return false @@ -211,8 +217,14 @@ func ContinueOnError(err error) bool { return ContinueOnError(v.Err) case errcode.Error: return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true } - return false + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true } // NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the diff --git a/vendor/github.com/docker/docker/registry/registry_test.go b/vendor/github.com/docker/docker/registry/registry_test.go index 88b08dff..7714310d 100644 --- a/vendor/github.com/docker/docker/registry/registry_test.go +++ b/vendor/github.com/docker/docker/registry/registry_test.go @@ -23,7 +23,7 @@ const ( func spawnTestRegistrySession(t *testing.T) *Session { authConfig := &cliconfig.AuthConfig{} - endpoint, err := NewEndpoint(makeIndex("/v1/"), nil) + endpoint, err := NewEndpoint(makeIndex("/v1/"), nil, APIVersionUnknown) if err != nil { t.Fatal(err) } @@ -50,7 +50,7 @@ func spawnTestRegistrySession(t *testing.T) *Session { func TestPingRegistryEndpoint(t *testing.T) { testPing := func(index *IndexInfo, expectedStandalone bool, assertMessage string) { - ep, err := NewEndpoint(index, nil) + ep, err := NewEndpoint(index, nil, APIVersionUnknown) if err != nil { t.Fatal(err) } @@ -70,7 +70,7 @@ func TestPingRegistryEndpoint(t *testing.T) { func TestEndpoint(t *testing.T) { // Simple wrapper to fail test if err != nil expandEndpoint := func(index *IndexInfo) *Endpoint { - endpoint, err := NewEndpoint(index, nil) + endpoint, err := NewEndpoint(index, nil, APIVersionUnknown) if err != nil { t.Fatal(err) } @@ -79,7 +79,7 @@ func TestEndpoint(t *testing.T) { assertInsecureIndex := func(index *IndexInfo) { index.Secure = true - _, err := NewEndpoint(index, nil) + _, err := NewEndpoint(index, nil, APIVersionUnknown) assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") index.Secure = false @@ -87,7 +87,7 @@ func TestEndpoint(t *testing.T) { assertSecureIndex := func(index *IndexInfo) { index.Secure = true - _, err := NewEndpoint(index, nil) + _, err := NewEndpoint(index, nil, APIVersionUnknown) assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") index.Secure = false @@ -153,7 +153,7 @@ func TestEndpoint(t *testing.T) { } for _, address := range badEndpoints { index.Name = address - _, err := NewEndpoint(index, nil) + _, err := NewEndpoint(index, nil, APIVersionUnknown) checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") } } @@ -185,7 +185,7 @@ func TestGetRemoteImageJSON(t *testing.T) { if err != nil { t.Fatal(err) } - assertEqual(t, size, 154, "Expected size 154") + assertEqual(t, size, int64(154), "Expected size 154") if len(json) <= 0 { t.Fatal("Expected non-empty json") } @@ -677,6 +677,35 @@ func TestNewIndexInfo(t *testing.T) { testIndexInfo(config, expectedIndexInfos) } +func TestMirrorEndpointLookup(t *testing.T) { + containsMirror := func(endpoints []APIEndpoint) bool { + for _, pe := range endpoints { + if pe.URL == "my.mirror" { + return true + } + } + return false + } + s := Service{Config: makeServiceConfig([]string{"my.mirror"}, nil)} + imageName := IndexName + "/test/image" + + pushAPIEndpoints, err := s.LookupPushEndpoints(imageName) + if err != nil { + t.Fatal(err) + } + if containsMirror(pushAPIEndpoints) { + t.Fatal("Push endpoint should not contain mirror") + } + + pullAPIEndpoints, err := s.LookupPullEndpoints(imageName) + if err != nil { + t.Fatal(err) + } + if !containsMirror(pullAPIEndpoints) { + t.Fatal("Pull endpoint should contain mirror") + } +} + func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistrySession(t) err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/")) @@ -738,12 +767,18 @@ func TestValidRemoteName(t *testing.T) { // Allow embedded hyphens. "docker-rules/docker", + // Allow multiple hyphens as well. + "docker---rules/docker", + //Username doc and image name docker being tested. "doc/docker", // single character names are now allowed. "d/docker", "jess/t", + + // Consecutive underscores. + "dock__er/docker", } for _, repositoryName := range validRepositoryNames { if err := validateRemoteName(repositoryName); err != nil { @@ -771,8 +806,10 @@ func TestValidRemoteName(t *testing.T) { "_docker/_docker", - // Disallow consecutive hyphens. - "dock--er/docker", + // Disallow consecutive periods. + "dock..er/docker", + "dock_.er/docker", + "dock-.er/docker", // No repository. "docker/", diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go index f4ea42ef..6ac930d6 100644 --- a/vendor/github.com/docker/docker/registry/service.go +++ b/vendor/github.com/docker/docker/registry/service.go @@ -2,14 +2,11 @@ package registry import ( "crypto/tls" - "fmt" "net/http" "net/url" - "strings" "github.com/docker/distribution/registry/client/auth" "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/tlsconfig" ) // Service is a registry service. It tracks configuration data such as a list @@ -27,7 +24,7 @@ func NewService(options *Options) *Service { } // Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was sucessful. +// and returns OK if authentication was successful. // It can be used to verify the validity of a client's credentials. func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) { addr := authConfig.ServerAddress @@ -39,7 +36,14 @@ func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) { if err != nil { return "", err } - endpoint, err := NewEndpoint(index, nil) + + endpointVersion := APIVersion(APIVersionUnknown) + if V2Only { + // Override the endpoint to only attempt a v2 ping + endpointVersion = APIVersion2 + } + + endpoint, err := NewEndpoint(index, nil, endpointVersion) if err != nil { return "", err } @@ -50,16 +54,18 @@ func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) { // Search queries the public registry for images matching the specified // search terms, and returns the results. func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers map[string][]string) (*SearchResults, error) { - repoInfo, err := s.ResolveRepository(term) + + repoInfo, err := s.ResolveRepositoryBySearch(term) if err != nil { return nil, err } // *TODO: Search multiple indexes. - endpoint, err := repoInfo.GetEndpoint(http.Header(headers)) + endpoint, err := NewEndpoint(repoInfo.Index, http.Header(headers), APIVersionUnknown) if err != nil { return nil, err } + r, err := NewSession(endpoint.client, authConfig, endpoint) if err != nil { return nil, err @@ -70,7 +76,13 @@ func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers // ResolveRepository splits a repository name into its components // and configuration of the associated registry. func (s *Service) ResolveRepository(name string) (*RepositoryInfo, error) { - return s.Config.NewRepositoryInfo(name) + return s.Config.NewRepositoryInfo(name, false) +} + +// ResolveRepositoryBySearch splits a repository name into its components +// and configuration of the associated registry. +func (s *Service) ResolveRepositoryBySearch(name string) (*RepositoryInfo, error) { + return s.Config.NewRepositoryInfo(name, true) } // ResolveIndex takes indexName and returns index info @@ -108,99 +120,43 @@ func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) { return s.TLSConfig(mirrorURL.Host) } -// LookupEndpoints creates an list of endpoints to try, in order of preference. +// LookupPullEndpoints creates an list of endpoints to try to pull from, in order of preference. // It gives preference to v2 endpoints over v1, mirrors over the actual // registry, and HTTPS over plain HTTP. -func (s *Service) LookupEndpoints(repoName string) (endpoints []APIEndpoint, err error) { - var cfg = tlsconfig.ServerDefault - tlsConfig := &cfg - if strings.HasPrefix(repoName, DefaultNamespace+"/") { - // v2 mirrors - for _, mirror := range s.Config.Mirrors { - mirrorTLSConfig, err := s.tlsConfigForMirror(mirror) - if err != nil { - return nil, err +func (s *Service) LookupPullEndpoints(repoName string) (endpoints []APIEndpoint, err error) { + return s.lookupEndpoints(repoName) +} + +// LookupPushEndpoints creates an list of endpoints to try to push to, in order of preference. +// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. +// Mirrors are not included. +func (s *Service) LookupPushEndpoints(repoName string) (endpoints []APIEndpoint, err error) { + allEndpoints, err := s.lookupEndpoints(repoName) + if err == nil { + for _, endpoint := range allEndpoints { + if !endpoint.Mirror { + endpoints = append(endpoints, endpoint) } - endpoints = append(endpoints, APIEndpoint{ - URL: mirror, - // guess mirrors are v2 - Version: APIVersion2, - Mirror: true, - TrimHostname: true, - TLSConfig: mirrorTLSConfig, - }) } - // v2 registry - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Version: APIVersion2, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - // v1 registry - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV1Registry, - Version: APIVersion1, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - return endpoints, nil } + return endpoints, err +} - slashIndex := strings.IndexRune(repoName, '/') - if slashIndex <= 0 { - return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) - } - hostname := repoName[:slashIndex] - - tlsConfig, err = s.TLSConfig(hostname) +func (s *Service) lookupEndpoints(repoName string) (endpoints []APIEndpoint, err error) { + endpoints, err = s.lookupV2Endpoints(repoName) if err != nil { return nil, err } - isSecure := !tlsConfig.InsecureSkipVerify - v2Versions := []auth.APIVersion{ - { - Type: "registry", - Version: "2.0", - }, - } - endpoints = []APIEndpoint{ - { - URL: "https://" + hostname, - Version: APIVersion2, - TrimHostname: true, - TLSConfig: tlsConfig, - VersionHeader: DefaultRegistryVersionHeader, - Versions: v2Versions, - }, - { - URL: "https://" + hostname, - Version: APIVersion1, - TrimHostname: true, - TLSConfig: tlsConfig, - }, + if V2Only { + return endpoints, nil } - if !isSecure { - endpoints = append(endpoints, APIEndpoint{ - URL: "http://" + hostname, - Version: APIVersion2, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - VersionHeader: DefaultRegistryVersionHeader, - Versions: v2Versions, - }, APIEndpoint{ - URL: "http://" + hostname, - Version: APIVersion1, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) + legacyEndpoints, err := s.lookupV1Endpoints(repoName) + if err != nil { + return nil, err } + endpoints = append(endpoints, legacyEndpoints...) return endpoints, nil } diff --git a/vendor/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go new file mode 100644 index 00000000..ddb78ee6 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v1.go @@ -0,0 +1,54 @@ +package registry + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/tlsconfig" +) + +func (s *Service) lookupV1Endpoints(repoName string) (endpoints []APIEndpoint, err error) { + var cfg = tlsconfig.ServerDefault + tlsConfig := &cfg + if strings.HasPrefix(repoName, DefaultNamespace+"/") { + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV1Registry, + Version: APIVersion1, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + return endpoints, nil + } + + slashIndex := strings.IndexRune(repoName, '/') + if slashIndex <= 0 { + return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) + } + hostname := repoName[:slashIndex] + + tlsConfig, err = s.TLSConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: "https://" + hostname, + Version: APIVersion1, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ // or this + URL: "http://" + hostname, + Version: APIVersion1, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go new file mode 100644 index 00000000..70d5fd71 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -0,0 +1,83 @@ +package registry + +import ( + "fmt" + "strings" + + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/pkg/tlsconfig" +) + +func (s *Service) lookupV2Endpoints(repoName string) (endpoints []APIEndpoint, err error) { + var cfg = tlsconfig.ServerDefault + tlsConfig := &cfg + if strings.HasPrefix(repoName, DefaultNamespace+"/") { + // v2 mirrors + for _, mirror := range s.Config.Mirrors { + mirrorTLSConfig, err := s.tlsConfigForMirror(mirror) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirror, + // guess mirrors are v2 + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + // v2 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + + return endpoints, nil + } + + slashIndex := strings.IndexRune(repoName, '/') + if slashIndex <= 0 { + return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) + } + hostname := repoName[:slashIndex] + + tlsConfig, err = s.TLSConfig(hostname) + if err != nil { + return nil, err + } + + v2Versions := []auth.APIVersion{ + { + Type: "registry", + Version: "2.0", + }, + } + endpoints = []APIEndpoint{ + { + URL: "https://" + hostname, + Version: APIVersion2, + TrimHostname: true, + TLSConfig: tlsConfig, + VersionHeader: DefaultRegistryVersionHeader, + Versions: v2Versions, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: "http://" + hostname, + Version: APIVersion2, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + VersionHeader: DefaultRegistryVersionHeader, + Versions: v2Versions, + }) + } + + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go index 9bec7c1b..2a20d321 100644 --- a/vendor/github.com/docker/docker/registry/session.go +++ b/vendor/github.com/docker/docker/registry/session.go @@ -25,6 +25,7 @@ import ( "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/utils" ) var ( @@ -240,7 +241,7 @@ func (r *Session) LookupRemoteImage(imgID, registry string) error { } // GetRemoteImageJSON retrieves an image's JSON metadata from the registry. -func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error) { +func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { res, err := r.client.Get(registry + "images/" + imgID + "/json") if err != nil { return nil, -1, fmt.Errorf("Failed to download json: %s", err) @@ -250,9 +251,9 @@ func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } // if the size header is not present, then set it to '-1' - imageSize := -1 + imageSize := int64(-1) if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { - imageSize, err = strconv.Atoi(hdr) + imageSize, err = strconv.ParseInt(hdr, 10, 64) if err != nil { return nil, -1, err } @@ -424,7 +425,7 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { // and return a non-obtuse error message for users // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" // was a top search on the docker user forum - if strings.HasSuffix(err.Error(), "i/o timeout") { + if utils.IsTimeout(err) { return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) } return nil, fmt.Errorf("Error while pulling image: %v", err) diff --git a/vendor/github.com/docker/docker/runconfig/compare_test.go b/vendor/github.com/docker/docker/runconfig/compare_test.go index e59c3b2f..1a5c6c6c 100644 --- a/vendor/github.com/docker/docker/runconfig/compare_test.go +++ b/vendor/github.com/docker/docker/runconfig/compare_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/stringutils" ) // Just to make life easier @@ -32,12 +33,12 @@ func TestCompare(t *testing.T) { volumes3["/test3"] = struct{}{} envs1 := []string{"ENV1=value1", "ENV2=value2"} envs2 := []string{"ENV1=value1", "ENV3=value3"} - entrypoint1 := &Entrypoint{parts: []string{"/bin/sh", "-c"}} - entrypoint2 := &Entrypoint{parts: []string{"/bin/sh", "-d"}} - entrypoint3 := &Entrypoint{parts: []string{"/bin/sh", "-c", "echo"}} - cmd1 := &Command{parts: []string{"/bin/sh", "-c"}} - cmd2 := &Command{parts: []string{"/bin/sh", "-d"}} - cmd3 := &Command{parts: []string{"/bin/sh", "-c", "echo"}} + entrypoint1 := stringutils.NewStrSlice("/bin/sh", "-c") + entrypoint2 := stringutils.NewStrSlice("/bin/sh", "-d") + entrypoint3 := stringutils.NewStrSlice("/bin/sh", "-c", "echo") + cmd1 := stringutils.NewStrSlice("/bin/sh", "-c") + cmd2 := stringutils.NewStrSlice("/bin/sh", "-d") + cmd3 := stringutils.NewStrSlice("/bin/sh", "-c", "echo") labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go index 0312cc4c..93ddb46c 100644 --- a/vendor/github.com/docker/docker/runconfig/config.go +++ b/vendor/github.com/docker/docker/runconfig/config.go @@ -2,198 +2,47 @@ package runconfig import ( "encoding/json" + "fmt" "io" - "strings" "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/volume" ) -// Entrypoint encapsulates the container entrypoint. -// It might be represented as a string or an array of strings. -// We need to override the json decoder to accept both options. -// The JSON decoder will fail if the api sends an string and -// we try to decode it into an array of string. -type Entrypoint struct { - parts []string -} - -// MarshalJSON Marshals (or serializes) the Entrypoint into the json format. -// This method is needed to implement json.Marshaller. -func (e *Entrypoint) MarshalJSON() ([]byte, error) { - if e == nil { - return []byte{}, nil - } - return json.Marshal(e.Slice()) -} - -// UnmarshalJSON decodes the entrypoint whether it's a string or an array of strings. -// This method is needed to implement json.Unmarshaler. -func (e *Entrypoint) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - e.parts = p - return nil -} - -// Len returns the number of parts of the Entrypoint. -func (e *Entrypoint) Len() int { - if e == nil { - return 0 - } - return len(e.parts) -} - -// Slice gets the parts of the Entrypoint as a Slice of string. -func (e *Entrypoint) Slice() []string { - if e == nil { - return nil - } - return e.parts -} - -// NewEntrypoint creates an Entrypoint based on the specified parts (as strings). -func NewEntrypoint(parts ...string) *Entrypoint { - return &Entrypoint{parts} -} - -// Command encapsulates the container command. -// It might be represented as a string or an array of strings. -// We need to override the json decoder to accept both options. -// The JSON decoder will fail if the api sends an string and -// we try to decode it into an array of string. -type Command struct { - parts []string -} - -// ToString gets a string representing a Command. -func (e *Command) ToString() string { - return strings.Join(e.parts, " ") -} - -// MarshalJSON Marshals (or serializes) the Command into the json format. -// This method is needed to implement json.Marshaller. -func (e *Command) MarshalJSON() ([]byte, error) { - if e == nil { - return []byte{}, nil - } - return json.Marshal(e.Slice()) -} - -// UnmarshalJSON decodes the entrypoint whether it's a string or an array of strings. -// This method is needed to implement json.Unmarshaler. -func (e *Command) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - e.parts = p - return nil -} - -// Len returns the number of parts of the Entrypoint. -func (e *Command) Len() int { - if e == nil { - return 0 - } - return len(e.parts) -} - -// Slice gets the parts of the Entrypoint as a Slice of string. -func (e *Command) Slice() []string { - if e == nil { - return nil - } - return e.parts -} - -// NewCommand creates a Command based on the specified parts (as strings). -func NewCommand(parts ...string) *Command { - return &Command{parts} -} - // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". // Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container + + // Applicable to all platforms AttachStdin bool // Attach the standard input, makes possible user interaction AttachStdout bool // Attach the standard output AttachStderr bool // Attach the standard error - ExposedPorts map[nat.Port]struct{} // List of exposed ports - PublishService string // Name of the network service exposed by the container - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + Cmd *stringutils.StrSlice // Command to run when starting the container + Entrypoint *stringutils.StrSlice // Entrypoint to run when starting the container + Env []string // List of environment variable to set in the container + ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports + Hostname string // Hostname + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Labels map[string]string // List of labels set to this container + MacAddress string `json:",omitempty"` // Mac Address of the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled (--net=none) + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd *Command // Command to run when starting the container - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. Volumes map[string]struct{} // List of volumes (mounts) used for the container - VolumeDriver string // Name of the volume driver used to mount volumes WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint *Entrypoint // Entrypoint to run when starting the container - NetworkDisabled bool // Is network disabled - MacAddress string // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container -} -// ContainerConfigWrapper is a Config wrapper that hold the container Config (portable) -// and the corresponding HostConfig (non-portable). -type ContainerConfigWrapper struct { - *Config - InnerHostConfig *HostConfig `json:"HostConfig,omitempty"` - Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. - *HostConfig // Deprecated. Exported to read attrubutes from json that are not in the inner host config structure. - -} - -// GetHostConfig gets the HostConfig of the Config. -// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper -func (w *ContainerConfigWrapper) GetHostConfig() *HostConfig { - hc := w.HostConfig - - if hc == nil && w.InnerHostConfig != nil { - hc = w.InnerHostConfig - } else if w.InnerHostConfig != nil { - if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { - w.InnerHostConfig.Memory = hc.Memory - } - if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { - w.InnerHostConfig.MemorySwap = hc.MemorySwap - } - if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { - w.InnerHostConfig.CPUShares = hc.CPUShares - } - - hc = w.InnerHostConfig - } - - if hc != nil && w.Cpuset != "" && hc.CpusetCpus == "" { - hc.CpusetCpus = w.Cpuset - } - - return hc + // Applicable to UNIX platforms + Domainname string // Domainname + PublishService string `json:",omitempty"` // Name of the network service exposed by the container + StopSignal string `json:",omitempty"` // Signal to stop a container + User string // User that will run the command(s) inside the container } // DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper @@ -201,12 +50,57 @@ func (w *ContainerConfigWrapper) GetHostConfig() *HostConfig { // Be aware this function is not checking whether the resulted structs are nil, // it's your business to do so func DecodeContainerConfig(src io.Reader) (*Config, *HostConfig, error) { - decoder := json.NewDecoder(src) - var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) if err := decoder.Decode(&w); err != nil { return nil, nil, err } - return w.Config, w.GetHostConfig(), nil + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialise the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + + // Now validate all the volumes and binds + if err := validateVolumesAndBindSettings(w.Config, hc); err != nil { + return nil, nil, err + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := ValidateNetMode(w.Config, hc); err != nil { + return nil, nil, err + } + + // Validate the isolation level + if err := ValidateIsolationLevel(hc); err != nil { + return nil, nil, err + } + return w.Config, hc, nil +} + +// validateVolumesAndBindSettings validates each of the volumes and bind settings +// passed by the caller to ensure they are valid. +func validateVolumesAndBindSettings(c *Config, hc *HostConfig) error { + + // Ensure all volumes and binds are valid. + for spec := range c.Volumes { + if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("Invalid volume spec %q: %v", spec, err) + } + } + for _, spec := range hc.Binds { + if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("Invalid bind mount spec %q: %v", spec, err) + } + } + + return nil } diff --git a/vendor/github.com/docker/docker/runconfig/config_test.go b/vendor/github.com/docker/docker/runconfig/config_test.go index 9efe1dff..b721cf10 100644 --- a/vendor/github.com/docker/docker/runconfig/config_test.go +++ b/vendor/github.com/docker/docker/runconfig/config_test.go @@ -5,121 +5,37 @@ import ( "encoding/json" "fmt" "io/ioutil" + "runtime" + "strings" "testing" + + "github.com/docker/docker/pkg/stringutils" ) -func TestEntrypointMarshalJSON(t *testing.T) { - entrypoints := map[*Entrypoint]string{ - nil: "", - &Entrypoint{}: "null", - &Entrypoint{[]string{"/bin/sh", "-c", "echo"}}: `["/bin/sh","-c","echo"]`, - } - - for entrypoint, expected := range entrypoints { - data, err := entrypoint.MarshalJSON() - if err != nil { - t.Fatal(err) - } - if string(data) != expected { - t.Fatalf("Expected %v, got %v", expected, string(data)) - } - } -} - -func TestEntrypointUnmarshalJSON(t *testing.T) { - parts := map[string][]string{ - "": {"default", "values"}, - "[]": {}, - `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, - } - for json, expectedParts := range parts { - entrypoint := &Entrypoint{ - []string{"default", "values"}, - } - if err := entrypoint.UnmarshalJSON([]byte(json)); err != nil { - t.Fatal(err) - } - - actualParts := entrypoint.Slice() - if len(actualParts) != len(expectedParts) { - t.Fatalf("Expected %v parts, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) - } - for index, part := range actualParts { - if part != expectedParts[index] { - t.Fatalf("Expected %v, got %v", expectedParts, actualParts) - break - } - } - } -} - -func TestCommandToString(t *testing.T) { - commands := map[*Command]string{ - &Command{[]string{""}}: "", - &Command{[]string{"one"}}: "one", - &Command{[]string{"one", "two"}}: "one two", - } - for command, expected := range commands { - toString := command.ToString() - if toString != expected { - t.Fatalf("Expected %v, got %v", expected, toString) - } - } -} - -func TestCommandMarshalJSON(t *testing.T) { - commands := map[*Command]string{ - nil: "", - &Command{}: "null", - &Command{[]string{"/bin/sh", "-c", "echo"}}: `["/bin/sh","-c","echo"]`, - } - - for command, expected := range commands { - data, err := command.MarshalJSON() - if err != nil { - t.Fatal(err) - } - if string(data) != expected { - t.Fatalf("Expected %v, got %v", expected, string(data)) - } - } -} - -func TestCommandUnmarshalJSON(t *testing.T) { - parts := map[string][]string{ - "": {"default", "values"}, - "[]": {}, - `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, - } - for json, expectedParts := range parts { - command := &Command{ - []string{"default", "values"}, - } - if err := command.UnmarshalJSON([]byte(json)); err != nil { - t.Fatal(err) - } - - actualParts := command.Slice() - if len(actualParts) != len(expectedParts) { - t.Fatalf("Expected %v parts, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) - } - for index, part := range actualParts { - if part != expectedParts[index] { - t.Fatalf("Expected %v, got %v", expectedParts, actualParts) - break - } - } - } +type f struct { + file string + entrypoint *stringutils.StrSlice } func TestDecodeContainerConfig(t *testing.T) { - fixtures := []struct { - file string - entrypoint *Entrypoint - }{ - {"fixtures/container_config_1_14.json", NewEntrypoint()}, - {"fixtures/container_config_1_17.json", NewEntrypoint("bash")}, - {"fixtures/container_config_1_19.json", NewEntrypoint("bash")}, + + var ( + fixtures []f + image string + ) + + if runtime.GOOS != "windows" { + image = "ubuntu" + fixtures = []f{ + {"fixtures/unix/container_config_1_14.json", stringutils.NewStrSlice()}, + {"fixtures/unix/container_config_1_17.json", stringutils.NewStrSlice("bash")}, + {"fixtures/unix/container_config_1_19.json", stringutils.NewStrSlice("bash")}, + } + } else { + image = "windows" + fixtures = []f{ + {"fixtures/windows/container_config_1_19.json", stringutils.NewStrSlice("cmd")}, + } } for _, f := range fixtures { @@ -133,96 +49,71 @@ func TestDecodeContainerConfig(t *testing.T) { t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) } - if c.Image != "ubuntu" { - t.Fatalf("Expected ubuntu image, found %s\n", c.Image) + if c.Image != image { + t.Fatalf("Expected %s image, found %s\n", image, c.Image) } if c.Entrypoint.Len() != f.entrypoint.Len() { t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) } - if h.Memory != 1000 { + if h != nil && h.Memory != 1000 { t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) } } } -func TestEntrypointUnmarshalString(t *testing.T) { - var e *Entrypoint - echo, err := json.Marshal("echo") - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) +// TestDecodeContainerConfigIsolation validates the isolation level passed +// to the daemon in the hostConfig structure. Note this is platform specific +// as to what level of container isolation is supported. +func TestDecodeContainerConfigIsolation(t *testing.T) { + + // An invalid isolation level + if _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { + t.Fatal(err) + } } - slice := e.Slice() - if len(slice) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", slice) + // Blank isolation level (== default) + if _, _, err := callDecodeContainerConfigIsolation(""); err != nil { + t.Fatal("Blank isolation should have succeeded") } - if slice[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", slice[0]) + // Default isolation level + if _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { + t.Fatal("default isolation should have succeeded") + } + + // Hyper-V Containers isolation level (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + t.Fatal("hyperv isolation should have succeeded") + } + } else { + if _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { + t.Fatal(err) + } + } } } -func TestEntrypointUnmarshalSlice(t *testing.T) { - var e *Entrypoint - echo, err := json.Marshal([]string{"echo"}) - if err != nil { - t.Fatal(err) +// callDecodeContainerConfigIsolation is a utility function to call +// DecodeContainerConfig for validating isolation levels +func callDecodeContainerConfigIsolation(isolation string) (*Config, *HostConfig, error) { + var ( + b []byte + err error + ) + w := ContainerConfigWrapper{ + Config: &Config{}, + HostConfig: &HostConfig{ + NetworkMode: "none", + Isolation: IsolationLevel(isolation)}, } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) - } - - slice := e.Slice() - if len(slice) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", slice) - } - - if slice[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", slice[0]) - } -} - -func TestCommandUnmarshalSlice(t *testing.T) { - var e *Command - echo, err := json.Marshal([]string{"echo"}) - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) - } - - slice := e.Slice() - if len(slice) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", slice) - } - - if slice[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", slice[0]) - } -} - -func TestCommandUnmarshalString(t *testing.T) { - var e *Command - echo, err := json.Marshal("echo") - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(echo, &e); err != nil { - t.Fatal(err) - } - - slice := e.Slice() - if len(slice) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", slice) - } - - if slice[0] != "echo" { - t.Fatalf("expected `echo`, got: %q", slice[0]) + if b, err = json.Marshal(w); err != nil { + return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) } + return DecodeContainerConfig(bytes.NewReader(b)) } diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go new file mode 100644 index 00000000..63bd0a2f --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -0,0 +1,53 @@ +// +build !windows + +package runconfig + +// ContainerConfigWrapper is a Config wrapper that hold the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *Config + InnerHostConfig *HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + *HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behaviour. + hc = SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/config_windows.go b/vendor/github.com/docker/docker/runconfig/config_windows.go new file mode 100644 index 00000000..2ab8e19a --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_windows.go @@ -0,0 +1,13 @@ +package runconfig + +// ContainerConfigWrapper is a Config wrapper that hold the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *Config + HostConfig *HostConfig `json:"HostConfig,omitempty"` +} + +// getHostConfig gets the HostConfig of the Config. +func (w *ContainerConfigWrapper) getHostConfig() *HostConfig { + return w.HostConfig +} diff --git a/vendor/github.com/docker/docker/runconfig/exec.go b/vendor/github.com/docker/docker/runconfig/exec.go index 1f13d7dd..6fe28ea3 100644 --- a/vendor/github.com/docker/docker/runconfig/exec.go +++ b/vendor/github.com/docker/docker/runconfig/exec.go @@ -24,12 +24,13 @@ type ExecConfig struct { // not valid, it will return an error. func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { var ( - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") - flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") - execCmd []string - container string + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") + execCmd []string + container string ) cmd.Require(flag.Min, 2) if err := cmd.ParseFlags(args, true); err != nil { @@ -40,13 +41,12 @@ func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { execCmd = parsedArgs[1:] execConfig := &ExecConfig{ - User: *flUser, - // TODO(vishh): Expose 'Privileged' once it is supported. - // + //Privileged: job.GetenvBool("Privileged"), - Tty: *flTty, - Cmd: execCmd, - Container: container, - Detach: *flDetach, + User: *flUser, + Privileged: *flPrivileged, + Tty: *flTty, + Cmd: execCmd, + Container: container, + Detach: *flDetach, } // If -d is not set, attach to everything by default diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/container_config_1_14.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json similarity index 100% rename from vendor/github.com/docker/docker/runconfig/fixtures/container_config_1_14.json rename to vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/container_config_1_17.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json similarity index 97% rename from vendor/github.com/docker/docker/runconfig/fixtures/container_config_1_17.json rename to vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json index 60fc6e25..0d780877 100644 --- a/vendor/github.com/docker/docker/runconfig/fixtures/container_config_1_17.json +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json @@ -38,6 +38,7 @@ "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], + "DnsOptions": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/container_config_1_19.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json similarity index 98% rename from vendor/github.com/docker/docker/runconfig/fixtures/container_config_1_19.json rename to vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json index 9a3ce205..de49cf32 100644 --- a/vendor/github.com/docker/docker/runconfig/fixtures/container_config_1_19.json +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json @@ -42,6 +42,7 @@ "ReadonlyRootfs": false, "Dns": ["8.8.8.8"], "DnsSearch": [""], + "DnsOptions": [""], "ExtraHosts": null, "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_14.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json similarity index 100% rename from vendor/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_14.json rename to vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_19.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json similarity index 100% rename from vendor/github.com/docker/docker/runconfig/fixtures/container_hostconfig_1_19.json rename to vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json b/vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json new file mode 100644 index 00000000..724320c7 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "cmd", + "Image": "windows", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "c:/windows": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["c:/windows:d:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "default", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go index f6a89a31..dae899c4 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/pkg/ulimit" ) @@ -18,6 +19,16 @@ type KeyValuePair struct { // NetworkMode represents the container network stack. type NetworkMode string +// IsolationLevel represents the isolation level of a container. The supported +// values are platform specific +type IsolationLevel string + +// IsDefault indicates the default isolation level of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i IsolationLevel) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + // IpcMode represents the container ipc stack. type IpcMode string @@ -140,172 +151,68 @@ func (rp *RestartPolicy) IsOnFailure() bool { return rp.Name == "on-failure" } +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + // LogConfig represents the logging configuration of the container. type LogConfig struct { Type string Config map[string]string } -// LxcConfig represents the specific LXC configuration of the container. -type LxcConfig struct { - values []KeyValuePair -} - -// MarshalJSON marshals (or serializes) the LxcConfig into JSON. -func (c *LxcConfig) MarshalJSON() ([]byte, error) { - if c == nil { - return []byte{}, nil - } - return json.Marshal(c.Slice()) -} - -// UnmarshalJSON unmarshals (or deserializes) the specified byte slices from JSON to -// a LxcConfig. -func (c *LxcConfig) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return nil - } - - var kv []KeyValuePair - if err := json.Unmarshal(b, &kv); err != nil { - var h map[string]string - if err := json.Unmarshal(b, &h); err != nil { - return err - } - for k, v := range h { - kv = append(kv, KeyValuePair{k, v}) - } - } - c.values = kv - - return nil -} - -// Len returns the number of specific lxc configuration. -func (c *LxcConfig) Len() int { - if c == nil { - return 0 - } - return len(c.values) -} - -// Slice returns the specific lxc configuration into a slice of KeyValuePair. -func (c *LxcConfig) Slice() []KeyValuePair { - if c == nil { - return nil - } - return c.values -} - -// NewLxcConfig creates a LxcConfig from the specified slice of KeyValuePair. -func NewLxcConfig(values []KeyValuePair) *LxcConfig { - return &LxcConfig{values} -} - -// CapList represents the list of capabilities of the container. -type CapList struct { - caps []string -} - -// MarshalJSON marshals (or serializes) the CapList into JSON. -func (c *CapList) MarshalJSON() ([]byte, error) { - if c == nil { - return []byte{}, nil - } - return json.Marshal(c.Slice()) -} - -// UnmarshalJSON unmarshals (or deserializes) the specified byte slices -// from JSON to a CapList. -func (c *CapList) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return nil - } - - var caps []string - if err := json.Unmarshal(b, &caps); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - caps = append(caps, s) - } - c.caps = caps - - return nil -} - -// Len returns the number of specific kernel capabilities. -func (c *CapList) Len() int { - if c == nil { - return 0 - } - return len(c.caps) -} - -// Slice returns the specific capabilities into a slice of KeyValuePair. -func (c *CapList) Slice() []string { - if c == nil { - return nil - } - return c.caps -} - -// NewCapList creates a CapList from a slice of string. -func NewCapList(caps []string) *CapList { - return &CapList{caps} -} - // HostConfig the non-portable Config structure of a container. // Here, "non-portable" means "dependent of the host we are running on". // Portable information *should* appear in Config. type HostConfig struct { - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LxcConf *LxcConfig // Additional lxc configuration - Memory int64 // Memory limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - BlkioWeight int64 // Block IO weight (relative weight vs. other containers) - OomKillDisable bool // Whether to disable OOM Killer or not - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - Privileged bool // Is the container in privileged mode - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - Links []string // List of links (in the name:alias form) - PublishAllPorts bool // Should docker publish all exposed port for the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - VolumesFrom []string // List of volumes to take from other container - Devices []DeviceMapping // List of devices to map inside the container - NetworkMode NetworkMode // Network namespace to use for the container - IpcMode IpcMode // IPC namespace to use for the container - PidMode PidMode // PID namespace to use for the container - UTSMode UTSMode // UTS namespace to use for the container - CapAdd *CapList // List of kernel capabilities to add to the container - CapDrop *CapList // List of kernel capabilities to remove from the container - GroupAdd []string // List of additional groups that the container process will run as - RestartPolicy RestartPolicy // Restart policy to be used for the container - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - ReadonlyRootfs bool // Is the container root filesystem in read-only - Ulimits []*ulimit.Ulimit // List of ulimits to be set in the container - LogConfig LogConfig // Configuration of the logs for this container - CgroupParent string // Parent cgroup. - ConsoleSize [2]int // Initial console size on Windows -} + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container -// MergeConfigs merges the specified container Config and HostConfig. -// It creates a ContainerConfigWrapper. -func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper { - return &ContainerConfigWrapper{ - config, - hostConfig, - "", nil, - } + // Applicable to UNIX platforms + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + CapAdd *stringutils.StrSlice // List of kernel capabilities to add to the container + CapDrop *stringutils.StrSlice // List of kernel capabilities to remove from the container + CgroupParent string // Parent cgroup. + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + KernelMemory int64 // Kernel memory limit (in bytes) + Links []string // List of links (in the name:alias form) + Memory int64 // Memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable bool // Whether to disable OOM Killer or not + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + Ulimits []*ulimit.Ulimit // List of ulimits to be set in the container + UTSMode UTSMode // UTS namespace to use for the container + + // Applicable to Windows + ConsoleSize [2]int // Initial console size + Isolation IsolationLevel // Isolation level of the container (eg default, hyperv) } // DecodeHostConfig creates a HostConfig based on the specified Reader. @@ -318,7 +225,19 @@ func DecodeHostConfig(src io.Reader) (*HostConfig, error) { return nil, err } - hc := w.GetHostConfig() - + hc := w.getHostConfig() return hc, nil } + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *HostConfig) *HostConfig { + if hc != nil { + if hc.NetworkMode == NetworkMode("") { + hc.NetworkMode = NetworkMode("default") + } + } + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go index 7c0befc7..9fe3fa78 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go @@ -1,13 +1,15 @@ +// +build !windows + package runconfig import ( "bytes" - "encoding/json" "fmt" "io/ioutil" "testing" ) +// TODO Windows: This will need addressing for a Windows daemon. func TestNetworkModeTest(t *testing.T) { networkModes := map[NetworkMode][]bool{ // private, bridge, host, container, none, default @@ -22,7 +24,7 @@ func TestNetworkModeTest(t *testing.T) { } networkModeNames := map[NetworkMode]string{ "": "", - "something:weird": "", + "something:weird": "something:weird", "bridge": "bridge", DefaultDaemonNetworkMode(): "bridge", "host": "host", @@ -160,53 +162,6 @@ func TestRestartPolicy(t *testing.T) { } } -func TestLxcConfigMarshalJSON(t *testing.T) { - lxcConfigs := map[*LxcConfig]string{ - nil: "", - &LxcConfig{}: "null", - &LxcConfig{ - []KeyValuePair{{"key1", "value1"}}, - }: `[{"Key":"key1","Value":"value1"}]`, - } - - for lxcconfig, expected := range lxcConfigs { - data, err := lxcconfig.MarshalJSON() - if err != nil { - t.Fatal(err) - } - if string(data) != expected { - t.Fatalf("Expected %v, got %v", expected, string(data)) - } - } -} - -func TestLxcConfigUnmarshalJSON(t *testing.T) { - keyvaluePairs := map[string][]KeyValuePair{ - "": {{"key1", "value1"}}, - "[]": {}, - `[{"Key":"key2","Value":"value2"}]`: {{"key2", "value2"}}, - } - for json, expectedParts := range keyvaluePairs { - lxcConfig := &LxcConfig{ - []KeyValuePair{{"key1", "value1"}}, - } - if err := lxcConfig.UnmarshalJSON([]byte(json)); err != nil { - t.Fatal(err) - } - - actualParts := lxcConfig.Slice() - if len(actualParts) != len(expectedParts) { - t.Fatalf("Expected %v keyvaluePairs, got %v (%v)", len(expectedParts), len(actualParts), expectedParts) - } - for index, part := range actualParts { - if part != expectedParts[index] { - t.Fatalf("Expected %v, got %v", expectedParts, actualParts) - break - } - } - } -} - func TestMergeConfigs(t *testing.T) { expectedHostname := "hostname" expectedContainerIDFile := "containerIdFile" @@ -232,8 +187,8 @@ func TestDecodeHostConfig(t *testing.T) { fixtures := []struct { file string }{ - {"fixtures/container_hostconfig_1_14.json"}, - {"fixtures/container_hostconfig_1_19.json"}, + {"fixtures/unix/container_hostconfig_1_14.json"}, + {"fixtures/unix/container_hostconfig_1_19.json"}, } for _, f := range fixtures { @@ -264,40 +219,3 @@ func TestDecodeHostConfig(t *testing.T) { } } } - -func TestCapListUnmarshalSliceAndString(t *testing.T) { - var cl *CapList - cap0, err := json.Marshal([]string{"CAP_SOMETHING"}) - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(cap0, &cl); err != nil { - t.Fatal(err) - } - - slice := cl.Slice() - if len(slice) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", slice) - } - - if slice[0] != "CAP_SOMETHING" { - t.Fatalf("expected `CAP_SOMETHING`, got: %q", slice[0]) - } - - cap1, err := json.Marshal("CAP_SOMETHING") - if err != nil { - t.Fatal(err) - } - if err := json.Unmarshal(cap1, &cl); err != nil { - t.Fatal(err) - } - - slice = cl.Slice() - if len(slice) != 1 { - t.Fatalf("expected 1 element after unmarshal: %q", slice) - } - - if slice[0] != "CAP_SOMETHING" { - t.Fatalf("expected `CAP_SOMETHING`, got: %q", slice[0]) - } -} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go index 5239cb7d..7aad5a33 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -3,9 +3,16 @@ package runconfig import ( + "fmt" + "runtime" "strings" ) +// IsValid indicates is an isolation level is valid +func (i IsolationLevel) IsValid() bool { + return i.IsDefault() +} + // IsPrivate indicates whether container uses it's private network stack. func (n NetworkMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) @@ -34,6 +41,8 @@ func (n NetworkMode) NetworkName() string { return "none" } else if n.IsDefault() { return "default" + } else if n.IsUserDefined() { + return n.UserDefined() } return "" } @@ -58,3 +67,107 @@ func (n NetworkMode) IsContainer() bool { func (n NetworkMode) IsNone() bool { return n == "none" } + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// MergeConfigs merges the specified container Config and HostConfig. +// It creates a ContainerConfigWrapper. +func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper { + return &ContainerConfigWrapper{ + config, + hostConfig, + "", nil, + } +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *Config, hc *HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return fmt.Errorf("--net: invalid net mode: invalid container format container:") + } + } + + if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsUserDefined() && len(hc.Links) > 0 { + return ErrConflictUserDefinedNetworkAndLinks + } + + if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} + +// ValidateIsolationLevel performs platform specific validation of the +// isolation level in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func ValidateIsolationLevel(hc *HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go index a4c0297b..dbdb1683 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -1,10 +1,26 @@ package runconfig +import ( + "fmt" + "strings" +) + // IsDefault indicates whether container uses the default network stack. func (n NetworkMode) IsDefault() bool { return n == "default" } +// IsHyperV indicates the use of Hyper-V Containers for isolation (as opposed +// to Windows Server Containers +func (i IsolationLevel) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsValid indicates is an isolation level is valid +func (i IsolationLevel) IsValid() bool { + return i.IsDefault() || i.IsHyperV() +} + // DefaultDaemonNetworkMode returns the default network stack the daemon should // use. func DefaultDaemonNetworkMode() NetworkMode { @@ -18,3 +34,48 @@ func (n NetworkMode) NetworkName() string { } return "" } + +// MergeConfigs merges the specified container Config and HostConfig. +// It creates a ContainerConfigWrapper. +func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper { + return &ContainerConfigWrapper{ + config, + hostConfig, + } +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return false +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *Config, hc *HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + switch mode := parts[0]; mode { + case "default", "none": + default: + return fmt.Errorf("invalid --net: %s", hc.NetworkMode) + } + return nil +} + +// ValidateIsolationLevel performs platform specific validation of the +// isolation level in the hostconfig structure. Windows supports 'default' (or +// blank), and 'hyperv'. These refer to Windows Server Containers and +// Hyper-V Containers respectively. +func ValidateIsolationLevel(hc *HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q. Windows supports 'default' (Windows Server Container) or 'hyperv' (Hyper-V Container)", hc.Isolation) + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/merge.go b/vendor/github.com/docker/docker/runconfig/merge.go index 9a020a88..0106c7ea 100644 --- a/vendor/github.com/docker/docker/runconfig/merge.go +++ b/vendor/github.com/docker/docker/runconfig/merge.go @@ -47,6 +47,16 @@ func Merge(userConf, imageConf *Config) error { } } + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + if imageConf.Labels != nil { + for l := range userConf.Labels { + imageConf.Labels[l] = userConf.Labels[l] + } + userConf.Labels = imageConf.Labels + } + if userConf.Entrypoint.Len() == 0 { if userConf.Cmd.Len() == 0 { userConf.Cmd = imageConf.Cmd diff --git a/vendor/github.com/docker/docker/runconfig/parse.go b/vendor/github.com/docker/docker/runconfig/parse.go index 5528d7af..09899e7f 100644 --- a/vendor/github.com/docker/docker/runconfig/parse.go +++ b/vendor/github.com/docker/docker/runconfig/parse.go @@ -9,12 +9,21 @@ import ( flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/pkg/units" + "github.com/docker/docker/volume" ) var ( // ErrConflictContainerNetworkAndLinks conflict between --net=container and links ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior") + // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links + ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: --net= can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in --none mode") // ErrConflictNetworkAndDNS conflict between --dns and the network mode ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: --dns and the network mode (--net)") // ErrConflictNetworkHostname conflict between the hostname and the network mode @@ -31,20 +40,6 @@ var ( ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: --expose and the network mode (--expose)") ) -// validateNM is the set of fields passed to validateNetMode() -type validateNM struct { - netMode NetworkMode - flHostname *string - flLinks opts.ListOpts - flDNS opts.ListOpts - flExtraHosts opts.ListOpts - flMacAddress *string - flPublish opts.ListOpts - flPublishAll *bool - flExpose opts.ListOpts - flVolumeDriver string -} - // Parse parses the specified args for the specified command and generates a Config, // a HostConfig and returns them with the specified command. // If the specified args are not valid, it will return an error. @@ -52,7 +47,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(opts.ValidateAttach) - flVolumes = opts.NewListOpts(opts.ValidatePath) + flVolumes = opts.NewListOpts(nil) flLinks = opts.NewListOpts(opts.ValidateLink) flEnv = opts.NewListOpts(opts.ValidateEnv) flLabels = opts.NewListOpts(opts.ValidateEnv) @@ -60,51 +55,54 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe flUlimits = opts.NewUlimitOpt(nil) - flPublish = opts.NewListOpts(nil) - flExpose = opts.NewListOpts(nil) - flDNS = opts.NewListOpts(opts.ValidateIPAddress) - flDNSSearch = opts.NewListOpts(opts.ValidateDNSSearch) - flExtraHosts = opts.NewListOpts(opts.ValidateExtraHost) - flVolumesFrom = opts.NewListOpts(nil) - flLxcOpts = opts.NewListOpts(nil) - flEnvFile = opts.NewListOpts(nil) - flCapAdd = opts.NewListOpts(nil) - flCapDrop = opts.NewListOpts(nil) - flGroupAdd = opts.NewListOpts(nil) - flSecurityOpt = opts.NewListOpts(nil) - flLabelsFile = opts.NewListOpts(nil) - flLoggingOpts = opts.NewListOpts(nil) - - flNetwork = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container") - flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") - flPidMode = cmd.String([]string{"-pid"}, "", "PID namespace to use") - flUTSMode = cmd.String([]string{"-uts"}, "", "UTS namespace to use") - flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports") - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") - flOomKillDisable = cmd.Bool([]string{"-oom-kill-disable"}, false, "Disable OOM Killer") - flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") - flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") - flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") - flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit") - flMemorySwap = cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") - flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") - flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") - flCPUShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flCPUPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flCPUQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flCpusetCpus = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") - flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") - flBlkioWeight = cmd.Int64([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") - flSwappiness = cmd.Int64([]string{"-memory-swappiness"}, -1, "Tuning container memory swappiness (0 to 100)") - flNetMode = cmd.String([]string{"-net"}, "default", "Set the Network mode for the container") - flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") - flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") - flRestartPolicy = cmd.String([]string{"-restart"}, "no", "Restart policy to apply when a container exits") - flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only") - flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container") - flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") - flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container") + flPublish = opts.NewListOpts(nil) + flExpose = opts.NewListOpts(nil) + flDNS = opts.NewListOpts(opts.ValidateIPAddress) + flDNSSearch = opts.NewListOpts(opts.ValidateDNSSearch) + flDNSOptions = opts.NewListOpts(nil) + flExtraHosts = opts.NewListOpts(opts.ValidateExtraHost) + flVolumesFrom = opts.NewListOpts(nil) + flEnvFile = opts.NewListOpts(nil) + flCapAdd = opts.NewListOpts(nil) + flCapDrop = opts.NewListOpts(nil) + flGroupAdd = opts.NewListOpts(nil) + flSecurityOpt = opts.NewListOpts(nil) + flLabelsFile = opts.NewListOpts(nil) + flLoggingOpts = opts.NewListOpts(nil) + flNetwork = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container") + flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") + flPidMode = cmd.String([]string{"-pid"}, "", "PID namespace to use") + flUTSMode = cmd.String([]string{"-uts"}, "", "UTS namespace to use") + flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports") + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flOomKillDisable = cmd.Bool([]string{"-oom-kill-disable"}, false, "Disable OOM Killer") + flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") + flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") + flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") + flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemoryReservation = cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") + flMemorySwap = cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") + flKernelMemory = cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") + flCPUShares = cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCPUPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flCPUQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flCpusetCpus = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flBlkioWeight = cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") + flSwappiness = cmd.Int64([]string{"-memory-swappiness"}, -1, "Tuning container memory swappiness (0 to 100)") + flNetMode = cmd.String([]string{"-net"}, "default", "Set the Network for the container") + flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") + flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") + flRestartPolicy = cmd.String([]string{"-restart"}, "no", "Restart policy to apply when a container exits") + flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only") + flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container") + flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") + flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container") + flStopSignal = cmd.String([]string{"-stop-signal"}, signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) + flIsolation = cmd.String([]string{"-isolation"}, "", "Container isolation level") ) cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR") @@ -119,9 +117,9 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port or a range of ports") cmd.Var(&flDNS, []string{"#dns", "-dns"}, "Set custom DNS servers") cmd.Var(&flDNSSearch, []string{"-dns-search"}, "Set custom DNS search domains") + cmd.Var(&flDNSOptions, []string{"-dns-opt"}, "Set DNS options") cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") - cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options") cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") cmd.Var(&flGroupAdd, []string{"-group-add"}, "Add additional groups to join") @@ -129,8 +127,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options") - expFlags := attachExperimentalFlags(cmd) - cmd.Require(flag.Min, 1) if err := cmd.ParseFlags(args, true); err != nil { @@ -143,27 +139,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe attachStderr = flAttach.Get("stderr") ) - netMode, err := parseNetMode(*flNetMode) - if err != nil { - return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) - } - - vals := validateNM{ - netMode: netMode, - flHostname: flHostname, - flLinks: flLinks, - flDNS: flDNS, - flExtraHosts: flExtraHosts, - flMacAddress: flMacAddress, - flPublish: flPublish, - flPublishAll: flPublishAll, - flExpose: flExpose, - } - - if err := validateNetMode(&vals); err != nil { - return nil, nil, cmd, err - } - // Validate the input mac address if *flMacAddress != "" { if _, err := opts.ValidateMACAddress(*flMacAddress); err != nil { @@ -179,25 +154,41 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe attachStderr = true } + var err error + var flMemory int64 if *flMemoryString != "" { - parsedMemory, err := units.RAMInBytes(*flMemoryString) + flMemory, err = units.RAMInBytes(*flMemoryString) if err != nil { return nil, nil, cmd, err } - flMemory = parsedMemory } - var MemorySwap int64 + var MemoryReservation int64 + if *flMemoryReservation != "" { + MemoryReservation, err = units.RAMInBytes(*flMemoryReservation) + if err != nil { + return nil, nil, cmd, err + } + } + + var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { - MemorySwap = -1 + memorySwap = -1 } else { - parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) + memorySwap, err = units.RAMInBytes(*flMemorySwap) if err != nil { return nil, nil, cmd, err } - MemorySwap = parsedMemorySwap + } + } + + var KernelMemory int64 + if *flKernelMemory != "" { + KernelMemory, err = units.RAMInBytes(*flKernelMemory) + if err != nil { + return nil, nil, cmd, err } } @@ -209,38 +200,27 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe var binds []string // add any bind targets to the list of container volumes for bind := range flVolumes.GetMap() { - if arr := strings.Split(bind, ":"); len(arr) > 1 { - if arr[1] == "/" { - return nil, nil, cmd, fmt.Errorf("Invalid bind mount: destination can't be '/'") - } + if arr := volume.SplitN(bind, 2); len(arr) > 1 { // after creating the bind mount we want to delete it from the flVolumes values because // we do not want bind mounts being committed to image configs binds = append(binds, bind) flVolumes.Delete(bind) - } else if bind == "/" { - return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'") } } var ( parsedArgs = cmd.Args() - runCmd *Command - entrypoint *Entrypoint + runCmd *stringutils.StrSlice + entrypoint *stringutils.StrSlice image = cmd.Arg(0) ) if len(parsedArgs) > 1 { - runCmd = NewCommand(parsedArgs[1:]...) + runCmd = stringutils.NewStrSlice(parsedArgs[1:]...) } if *flEntrypoint != "" { - entrypoint = NewEntrypoint(*flEntrypoint) + entrypoint = stringutils.NewStrSlice(*flEntrypoint) } - lc, err := parseKeyValueOpts(flLxcOpts) - if err != nil { - return nil, nil, cmd, err - } - lxcConf := NewLxcConfig(lc) - var ( domainname string hostname = *flHostname @@ -345,50 +325,52 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe MacAddress: *flMacAddress, Entrypoint: entrypoint, WorkingDir: *flWorkingDir, - Labels: convertKVStringsToMap(labels), - VolumeDriver: *flVolumeDriver, + Labels: ConvertKVStringsToMap(labels), + StopSignal: *flStopSignal, } hostConfig := &HostConfig{ - Binds: binds, - ContainerIDFile: *flContainerIDFile, - LxcConf: lxcConf, - Memory: flMemory, - MemorySwap: MemorySwap, - CPUShares: *flCPUShares, - CPUPeriod: *flCPUPeriod, - CpusetCpus: *flCpusetCpus, - CpusetMems: *flCpusetMems, - CPUQuota: *flCPUQuota, - BlkioWeight: *flBlkioWeight, - OomKillDisable: *flOomKillDisable, - MemorySwappiness: flSwappiness, - Privileged: *flPrivileged, - PortBindings: portBindings, - Links: flLinks.GetAll(), - PublishAllPorts: *flPublishAll, - DNS: flDNS.GetAll(), - DNSSearch: flDNSSearch.GetAll(), - ExtraHosts: flExtraHosts.GetAll(), - VolumesFrom: flVolumesFrom.GetAll(), - NetworkMode: netMode, - IpcMode: ipcMode, - PidMode: pidMode, - UTSMode: utsMode, - Devices: deviceMappings, - CapAdd: NewCapList(flCapAdd.GetAll()), - CapDrop: NewCapList(flCapDrop.GetAll()), - GroupAdd: flGroupAdd.GetAll(), - RestartPolicy: restartPolicy, - SecurityOpt: flSecurityOpt.GetAll(), - ReadonlyRootfs: *flReadonlyRootfs, - Ulimits: flUlimits.GetList(), - LogConfig: LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, - CgroupParent: *flCgroupParent, + Binds: binds, + ContainerIDFile: *flContainerIDFile, + Memory: flMemory, + MemoryReservation: MemoryReservation, + MemorySwap: memorySwap, + KernelMemory: KernelMemory, + CPUShares: *flCPUShares, + CPUPeriod: *flCPUPeriod, + CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CPUQuota: *flCPUQuota, + BlkioWeight: *flBlkioWeight, + OomKillDisable: *flOomKillDisable, + MemorySwappiness: flSwappiness, + Privileged: *flPrivileged, + PortBindings: portBindings, + Links: flLinks.GetAll(), + PublishAllPorts: *flPublishAll, + DNS: flDNS.GetAll(), + DNSSearch: flDNSSearch.GetAll(), + DNSOptions: flDNSOptions.GetAll(), + ExtraHosts: flExtraHosts.GetAll(), + VolumesFrom: flVolumesFrom.GetAll(), + NetworkMode: NetworkMode(*flNetMode), + IpcMode: ipcMode, + PidMode: pidMode, + UTSMode: utsMode, + Devices: deviceMappings, + CapAdd: stringutils.NewStrSlice(flCapAdd.GetAll()...), + CapDrop: stringutils.NewStrSlice(flCapDrop.GetAll()...), + GroupAdd: flGroupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: flSecurityOpt.GetAll(), + ReadonlyRootfs: *flReadonlyRootfs, + Ulimits: flUlimits.GetList(), + LogConfig: LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, + CgroupParent: *flCgroupParent, + VolumeDriver: *flVolumeDriver, + Isolation: IsolationLevel(*flIsolation), } - applyExperimentalFlags(expFlags, config, hostConfig) - // When allocating stdin in attached mode, close stdin at client disconnect if config.OpenStdin && config.AttachStdin { config.StdinOnce = true @@ -412,8 +394,8 @@ func readKVStrings(files []string, override []string) ([]string, error) { return envVariables, nil } -// converts ["key=value"] to {"key":"value"} -func convertKVStringsToMap(values []string) map[string]string { +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { result := make(map[string]string, len(values)) for _, value := range values { kv := strings.SplitN(value, "=", 2) @@ -428,7 +410,7 @@ func convertKVStringsToMap(values []string) map[string]string { } func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { - loggingOptsMap := convertKVStringsToMap(loggingOpts) + loggingOptsMap := ConvertKVStringsToMap(loggingOpts) if loggingDriver == "none" && len(loggingOpts) > 0 { return map[string]string{}, fmt.Errorf("Invalid logging opts for driver %s", loggingDriver) } @@ -450,9 +432,9 @@ func ParseRestartPolicy(policy string) (RestartPolicy, error) { p.Name = name switch name { - case "always": + case "always", "unless-stopped": if len(parts) > 1 { - return p, fmt.Errorf("maximum restart count not valid with restart policy of \"always\"") + return p, fmt.Errorf("maximum restart count not valid with restart policy of \"%s\"", name) } case "no": // do nothing @@ -498,7 +480,11 @@ func ParseDevice(device string) (DeviceMapping, error) { permissions = arr[2] fallthrough case 2: - dst = arr[1] + if opts.ValidDeviceMode(arr[1]) { + permissions = arr[1] + } else { + dst = arr[1] + } fallthrough case 1: src = arr[0] diff --git a/vendor/github.com/docker/docker/runconfig/parse_experimental.go b/vendor/github.com/docker/docker/runconfig/parse_experimental.go deleted file mode 100644 index 8f8612ba..00000000 --- a/vendor/github.com/docker/docker/runconfig/parse_experimental.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build experimental - -package runconfig - -import flag "github.com/docker/docker/pkg/mflag" - -type experimentalFlags struct { - flags map[string]interface{} -} - -func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags { - flags := make(map[string]interface{}) - flags["publish-service"] = cmd.String([]string{"-publish-service"}, "", "Publish this container as a service") - return &experimentalFlags{flags: flags} -} - -func applyExperimentalFlags(exp *experimentalFlags, config *Config, hostConfig *HostConfig) { - config.PublishService = *(exp.flags["publish-service"]).(*string) -} diff --git a/vendor/github.com/docker/docker/runconfig/parse_stub.go b/vendor/github.com/docker/docker/runconfig/parse_stub.go deleted file mode 100644 index 391b6ed4..00000000 --- a/vendor/github.com/docker/docker/runconfig/parse_stub.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !experimental - -package runconfig - -import flag "github.com/docker/docker/pkg/mflag" - -type experimentalFlags struct{} - -func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags { - return nil -} - -func applyExperimentalFlags(flags *experimentalFlags, config *Config, hostConfig *HostConfig) { -} diff --git a/vendor/github.com/docker/docker/runconfig/parse_test.go b/vendor/github.com/docker/docker/runconfig/parse_test.go index 8916e7d4..d2406b09 100644 --- a/vendor/github.com/docker/docker/runconfig/parse_test.go +++ b/vendor/github.com/docker/docker/runconfig/parse_test.go @@ -1,14 +1,17 @@ package runconfig import ( + "bytes" + "encoding/json" "fmt" "io/ioutil" + "os" + "runtime" "strings" "testing" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/nat" - "github.com/docker/docker/pkg/parsers" ) func parseRun(args []string) (*Config, *HostConfig, *flag.FlagSet, error) { @@ -31,17 +34,6 @@ func mustParse(t *testing.T, args string) (*Config, *HostConfig) { return config, hostConfig } -// check if (a == c && b == d) || (a == d && b == c) -// because maps are randomized -func compareRandomizedStrings(a, b, c, d string) error { - if a == c && b == d { - return nil - } - if a == d && b == c { - return nil - } - return fmt.Errorf("strings don't match") -} func TestParseRunLinks(t *testing.T) { if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) @@ -98,181 +90,257 @@ func TestParseRunAttach(t *testing.T) { } func TestParseRunVolumes(t *testing.T) { - if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + + // A single volume + arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) } - if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/var"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) + // Two volumes + arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) } - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + // A single bind-mount + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) } - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp", "/hostVar:/containerVar") != nil { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + // Two bind-mounts. + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro", "/hostVar:/containerVar:rw") != nil { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + // Two bind-mounts, first read-only, second read-write. + // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) } - if _, hostConfig := mustParse(t, "-v /containerTmp:ro -v /containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/containerTmp:ro", "/containerVar:rw") != nil { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + // Similar to previous test but with alternate modes which are only supported by Linux + if runtime.GOOS != "windows" { + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } } - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro,Z -v /hostVar:/containerVar:rw,Z"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro,Z", "/hostVar:/containerVar:rw,Z") != nil { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro,Z -v /hostVar:/containerVar:rw,Z` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + // One bind mount and one volume + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) } - if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:Z", "/hostVar:/containerVar:z") != nil { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + // Root to non-c: drive letter (Windows specific) + if runtime.GOOS == "windows" { + arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { + t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) + } } - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) +} + +// This tests the cases for binds which are generated through +// DecodeContainerConfig rather than Parse() +func TestDecodeContainerConfigVolumes(t *testing.T) { + + // Root to root + bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) } - if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) - } else if len(config.Volumes) != 0 { - t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) + // No destination path + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) } - if _, _, err := parse(t, "-v /"); err == nil { - t.Fatalf("Expected error, but got none") + // // No destination path or mode + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) } - if _, _, err := parse(t, "-v /:/"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") + // A whole lot of nothing + bindsOrVols = []string{`:`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) } - if _, _, err := parse(t, "-v"); err == nil { - t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) } - if _, _, err := parse(t, "-v /tmp:"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") + + // A whole lot of nothing with no mode + bindsOrVols = []string{`::`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) } - if _, _, err := parse(t, "-v /tmp::"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) } - if _, _, err := parse(t, "-v :"); err == nil { - t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") + + // Too much including an invalid mode + wTmp := os.Getenv("TEMP") + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) } - if _, _, err := parse(t, "-v ::"); err == nil { - t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) } - if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") + + // Windows specific error tests + if runtime.GOOS == "windows" { + // Volume which does not include a drive letter + bindsOrVols = []string{`\tmp`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + + // Root to C-Drive + bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + + // Container path that does not include a drive letter + bindsOrVols = []string{`c:\windows:\somewhere`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + } + + // Linux-specific error tests + if runtime.GOOS != "windows" { + // Just root + bindsOrVols = []string{`/`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + + // A single volume that looks like a bind mount passed in Volumes. + // This should be handled as a bind mount, not a volume. + vols := []string{`/foo:/bar`} + if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { + t.Fatal("Volume /foo:/bar should have succeeded as a volume name") + } else if hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes[vols[0]]; !exists { + t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) + } + } } -func TestParseLxcConfOpt(t *testing.T) { - opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} - - for _, o := range opts { - k, v, err := parsers.ParseKeyValueOpt(o) - if err != nil { - t.FailNow() - } - if k != "lxc.utsname" { - t.Fail() - } - if v != "docker" { - t.Fail() - } +// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes +// to call DecodeContainerConfig. It effectively does what a client would +// do when calling the daemon by constructing a JSON stream of a +// ContainerConfigWrapper which is populated by the set of volume specs +// passed into it. It returns a config and a hostconfig which can be +// validated to ensure DecodeContainerConfig has manipulated the structures +// correctly. +func callDecodeContainerConfig(volumes []string, binds []string) (*Config, *HostConfig, error) { + var ( + b []byte + err error + c *Config + h *HostConfig + ) + w := ContainerConfigWrapper{ + Config: &Config{ + Volumes: map[string]struct{}{}, + }, + HostConfig: &HostConfig{ + NetworkMode: "none", + Binds: binds, + }, } - - // With parseRun too - _, hostconfig, _, err := parseRun([]string{"lxc.utsname=docker", "lxc.utsname = docker ", "img", "cmd"}) + for _, v := range volumes { + w.Config.Volumes[v] = struct{}{} + } + if b, err = json.Marshal(w); err != nil { + return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) + } + c, h, err = DecodeContainerConfig(bytes.NewReader(b)) if err != nil { - t.Fatal(err) + return nil, nil, fmt.Errorf("Error parsing %s: %v", string(b), err) } - for _, lxcConf := range hostconfig.LxcConf.Slice() { - if lxcConf.Key != "lxc.utsname" || lxcConf.Value != "docker" { - t.Fail() - } + if c == nil || h == nil { + return nil, nil, fmt.Errorf("Empty config or hostconfig") } + return c, h, err } -func TestNetHostname(t *testing.T) { - if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}); err != nil { - t.Fatalf("Unexpected error: %s", err) +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil } - - if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}); err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}); err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}); err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}); err != ErrConflictNetworkHostname { - t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) - } - - if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}); err != ErrConflictNetworkHostname { - t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) - } - if _, _, _, err := parseRun([]string{"--net=container", "img", "cmd"}); err == nil || err.Error() != "--net: invalid net mode: invalid container format container:" { - t.Fatalf("Expected error with --net=container, got : %v", err) - } - if _, _, _, err := parseRun([]string{"--net=weird", "img", "cmd"}); err == nil || err.Error() != "--net: invalid net mode: invalid --net: weird" { - t.Fatalf("Expected error with --net=weird, got: %s", err) + if a == d && b == c { + return nil } + return fmt.Errorf("strings don't match") } -func TestConflictContainerNetworkAndLinks(t *testing.T) { - if _, _, _, err := parseRun([]string{"--net=container:other", "--link=zip:zap", "img", "cmd"}); err != ErrConflictContainerNetworkAndLinks { - t.Fatalf("Expected error ErrConflictContainerNetworkAndLinks, got: %s", err) +// setupPlatformVolume takes two arrays of volume specs - a Unix style +// spec and a Windows style spec. Depending on the platform being unit tested, +// it returns one of them, along with a volume string that would be passed +// on the docker CLI (eg -v /bar -v /foo). +func setupPlatformVolume(u []string, w []string) ([]string, string) { + var a []string + if runtime.GOOS == "windows" { + a = w + } else { + a = u } - if _, _, _, err := parseRun([]string{"--net=host", "--link=zip:zap", "img", "cmd"}); err != ErrConflictHostNetworkAndLinks { - t.Fatalf("Expected error ErrConflictHostNetworkAndLinks, got: %s", err) - } -} - -func TestConflictNetworkModeAndOptions(t *testing.T) { - if _, _, _, err := parseRun([]string{"--net=host", "--dns=8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkAndDNS { - t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) - } - if _, _, _, err := parseRun([]string{"--net=container:other", "--dns=8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkAndDNS { - t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) - } - if _, _, _, err := parseRun([]string{"--net=host", "--add-host=name:8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkHosts { - t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) - } - if _, _, _, err := parseRun([]string{"--net=container:other", "--add-host=name:8.8.8.8", "img", "cmd"}); err != ErrConflictNetworkHosts { - t.Fatalf("Expected error ErrConflictNetworkAndDns, got %s", err) - } - if _, _, _, err := parseRun([]string{"--net=host", "--mac-address=92:d0:c6:0a:29:33", "img", "cmd"}); err != ErrConflictContainerNetworkAndMac { - t.Fatalf("Expected error ErrConflictContainerNetworkAndMac, got %s", err) - } - if _, _, _, err := parseRun([]string{"--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "img", "cmd"}); err != ErrConflictContainerNetworkAndMac { - t.Fatalf("Expected error ErrConflictContainerNetworkAndMac, got %s", err) - } - if _, _, _, err := parseRun([]string{"--net=container:other", "-P", "img", "cmd"}); err != ErrConflictNetworkPublishPorts { - t.Fatalf("Expected error ErrConflictNetworkPublishPorts, got %s", err) - } - if _, _, _, err := parseRun([]string{"--net=container:other", "-p", "8080", "img", "cmd"}); err != ErrConflictNetworkPublishPorts { - t.Fatalf("Expected error ErrConflictNetworkPublishPorts, got %s", err) - } - if _, _, _, err := parseRun([]string{"--net=container:other", "--expose", "8000-9000", "img", "cmd"}); err != ErrConflictNetworkExposePorts { - t.Fatalf("Expected error ErrConflictNetworkExposePorts, got %s", err) + s := "" + for _, v := range a { + s = s + "-v " + v + " " } + return a, s } // Simple parse with MacAddress validatation @@ -388,15 +456,20 @@ func TestParseDevice(t *testing.T) { PathInContainer: "/dev/snd", CgroupPermissions: "rwm", }, + "/dev/snd:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rw", + }, "/dev/snd:/something": { PathOnHost: "/dev/snd", PathInContainer: "/something", CgroupPermissions: "rwm", }, - "/dev/snd:/something:ro": { + "/dev/snd:/something:rw": { PathOnHost: "/dev/snd", PathInContainer: "/something", - CgroupPermissions: "ro", + CgroupPermissions: "rw", }, } for device, deviceMapping := range valids { @@ -504,9 +577,13 @@ func TestParseLoggingOpts(t *testing.T) { } func TestParseEnvfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } // env ko - if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != "open nonexistent: no such file or directory" { - t.Fatalf("Expected an error with message 'open nonexistent: no such file or directory', got %v", err) + if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) } // env ok config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) @@ -526,9 +603,13 @@ func TestParseEnvfileVariables(t *testing.T) { } func TestParseLabelfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } // label ko - if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != "open nonexistent: no such file or directory" { - t.Fatalf("Expected an error with message 'open nonexistent: no such file or directory', got %v", err) + if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) } // label ok config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) @@ -552,7 +633,7 @@ func TestParseEntryPoint(t *testing.T) { if err != nil { t.Fatal(err) } - if config.Entrypoint.Len() != 1 && config.Entrypoint.parts[0] != "anything" { + if config.Entrypoint.Len() != 1 && config.Entrypoint.Slice()[0] != "anything" { t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) } } diff --git a/vendor/github.com/docker/docker/runconfig/parse_unix.go b/vendor/github.com/docker/docker/runconfig/parse_unix.go deleted file mode 100644 index 7086b1ad..00000000 --- a/vendor/github.com/docker/docker/runconfig/parse_unix.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build !windows - -package runconfig - -import ( - "fmt" - "strings" -) - -func parseNetMode(netMode string) (NetworkMode, error) { - parts := strings.Split(netMode, ":") - switch mode := parts[0]; mode { - case "default", "bridge", "none", "host": - case "container": - if len(parts) < 2 || parts[1] == "" { - return "", fmt.Errorf("invalid container format container:") - } - default: - return "", fmt.Errorf("invalid --net: %s", netMode) - } - return NetworkMode(netMode), nil -} - -func validateNetMode(vals *validateNM) error { - - if (vals.netMode.IsHost() || vals.netMode.IsContainer()) && *vals.flHostname != "" { - return ErrConflictNetworkHostname - } - - if vals.netMode.IsHost() && vals.flLinks.Len() > 0 { - return ErrConflictHostNetworkAndLinks - } - - if vals.netMode.IsContainer() && vals.flLinks.Len() > 0 { - return ErrConflictContainerNetworkAndLinks - } - - if (vals.netMode.IsHost() || vals.netMode.IsContainer()) && vals.flDNS.Len() > 0 { - return ErrConflictNetworkAndDNS - } - - if (vals.netMode.IsContainer() || vals.netMode.IsHost()) && vals.flExtraHosts.Len() > 0 { - return ErrConflictNetworkHosts - } - - if (vals.netMode.IsContainer() || vals.netMode.IsHost()) && *vals.flMacAddress != "" { - return ErrConflictContainerNetworkAndMac - } - - if vals.netMode.IsContainer() && (vals.flPublish.Len() > 0 || *vals.flPublishAll == true) { - return ErrConflictNetworkPublishPorts - } - - if vals.netMode.IsContainer() && vals.flExpose.Len() > 0 { - return ErrConflictNetworkExposePorts - } - return nil -} diff --git a/vendor/github.com/docker/docker/runconfig/parse_windows.go b/vendor/github.com/docker/docker/runconfig/parse_windows.go deleted file mode 100644 index ca0a2e6d..00000000 --- a/vendor/github.com/docker/docker/runconfig/parse_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package runconfig - -import ( - "fmt" - "strings" -) - -func parseNetMode(netMode string) (NetworkMode, error) { - parts := strings.Split(netMode, ":") - switch mode := parts[0]; mode { - case "default", "none": - default: - return "", fmt.Errorf("invalid --net: %s", netMode) - } - return NetworkMode(netMode), nil -} - -func validateNetMode(vals *validateNM) error { - return nil -} diff --git a/vendor/github.com/docker/docker/utils/names.go b/vendor/github.com/docker/docker/utils/names.go new file mode 100644 index 00000000..e09e569b --- /dev/null +++ b/vendor/github.com/docker/docker/utils/names.go @@ -0,0 +1,9 @@ +package utils + +import "regexp" + +// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. +const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + +// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. +var RestrictedNamePattern = regexp.MustCompile(`^/?` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/docker/docker/utils/timeout.go b/vendor/github.com/docker/docker/utils/timeout.go new file mode 100644 index 00000000..85d2665c --- /dev/null +++ b/vendor/github.com/docker/docker/utils/timeout.go @@ -0,0 +1,21 @@ +package utils + +import ( + "net" + "net/url" +) + +// IsTimeout takes an error returned from (generally) the http package and determines if it is a timeout error. +func IsTimeout(err error) bool { + switch e := err.(type) { + case net.Error: + return e.Timeout() + case *url.Error: + if t, ok := e.Err.(net.Error); ok { + return t.Timeout() + } + return false + default: + return false + } +} diff --git a/vendor/github.com/docker/docker/utils/utils.go b/vendor/github.com/docker/docker/utils/utils.go index 8c98d472..d2e83f61 100644 --- a/vendor/github.com/docker/docker/utils/utils.go +++ b/vendor/github.com/docker/docker/utils/utils.go @@ -13,7 +13,8 @@ import ( "runtime" "strings" - "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/stringid" @@ -59,7 +60,7 @@ func isValidDockerInitPath(target string, selfPath string) bool { // target and if target == "" { return false } - if dockerversion.IAMSTATIC == "true" { + if dockerversion.IAmStatic == "true" { if selfPath == "" { return false } @@ -76,7 +77,7 @@ func isValidDockerInitPath(target string, selfPath string) bool { // target and } return os.SameFile(targetFileInfo, selfPathFileInfo) } - return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 + return dockerversion.InitSHA1 != "" && dockerInitSha1(target) == dockerversion.InitSHA1 } // DockerInitPath figures out the path of our dockerinit (which may be SelfPath()) @@ -88,7 +89,7 @@ func DockerInitPath(localCopy string) string { } var possibleInits = []string{ localCopy, - dockerversion.INITPATH, + dockerversion.InitPath, filepath.Join(filepath.Dir(selfPath), "dockerinit"), // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." @@ -199,9 +200,13 @@ func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { // can be read and returns an error if some files can't be read // symlinks which point to non-existing files don't trigger an error func ValidateContextDirectory(srcPath string, excludes []string) error { - return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { + contextRoot, err := getContextRoot(srcPath) + if err != nil { + return err + } + return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { + if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { return err } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { return err @@ -242,17 +247,11 @@ func ValidateContextDirectory(srcPath string, excludes []string) error { // ReadDockerIgnore reads a .dockerignore file and returns the list of file patterns // to ignore. Note this will trim whitespace from each line as well // as use GO's "clean" func to get the shortest/cleanest path for each. -func ReadDockerIgnore(path string) ([]string, error) { - // Note that a missing .dockerignore file isn't treated as an error - reader, err := os.Open(path) - if err != nil { - if !os.IsNotExist(err) { - return nil, fmt.Errorf("Error reading '%s': %v", path, err) - } +func ReadDockerIgnore(reader io.ReadCloser) ([]string, error) { + if reader == nil { return nil, nil } defer reader.Close() - scanner := bufio.NewScanner(reader) var excludes []string @@ -264,8 +263,8 @@ func ReadDockerIgnore(path string) ([]string, error) { pattern = filepath.Clean(pattern) excludes = append(excludes, pattern) } - if err = scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading '%s': %v", path, err) + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) } return excludes, nil } @@ -286,3 +285,22 @@ func ImageReference(repo, ref string) string { func DigestReference(ref string) bool { return strings.Contains(ref, ":") } + +// GetErrorMessage returns the human readable message associated with +// the passed-in error. In some cases the default Error() func returns +// something that is less than useful so based on its types this func +// will go and get a better piece of text. +func GetErrorMessage(err error) string { + switch err.(type) { + case errcode.Error: + e, _ := err.(errcode.Error) + return e.Message + + case errcode.ErrorCode: + ec, _ := err.(errcode.ErrorCode) + return ec.Message() + + default: + return err.Error() + } +} diff --git a/vendor/github.com/docker/docker/utils/utils_test.go b/vendor/github.com/docker/docker/utils/utils_test.go index 28630094..9acb8017 100644 --- a/vendor/github.com/docker/docker/utils/utils_test.go +++ b/vendor/github.com/docker/docker/utils/utils_test.go @@ -63,24 +63,27 @@ func TestReadDockerIgnore(t *testing.T) { } defer os.RemoveAll(tmpDir) - diName := filepath.Join(tmpDir, ".dockerignore") - - di, err := ReadDockerIgnore(diName) + di, err := ReadDockerIgnore(nil) if err != nil { - t.Fatalf("Expected not to have error, got %s", err) + t.Fatalf("Expected not to have error, got %v", err) } if diLen := len(di); diLen != 0 { t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) } + diName := filepath.Join(tmpDir, ".dockerignore") content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") err = ioutil.WriteFile(diName, []byte(content), 0777) if err != nil { t.Fatal(err) } - di, err = ReadDockerIgnore(diName) + diFd, err := os.Open(diName) + if err != nil { + t.Fatal(err) + } + di, err = ReadDockerIgnore(diFd) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/utils/utils_unix.go b/vendor/github.com/docker/docker/utils/utils_unix.go new file mode 100644 index 00000000..86bfb770 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/utils_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package utils + +import ( + "path/filepath" +) + +func getContextRoot(srcPath string) (string, error) { + return filepath.Join(srcPath, "."), nil +} diff --git a/vendor/github.com/docker/docker/utils/utils_windows.go b/vendor/github.com/docker/docker/utils/utils_windows.go new file mode 100644 index 00000000..80b58bd9 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/utils_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package utils + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/longpath" +) + +func getContextRoot(srcPath string) (string, error) { + cr, err := filepath.Abs(srcPath) + if err != nil { + return "", err + } + return longpath.AddPrefix(cr), nil +} diff --git a/vendor/github.com/docker/docker/volume/drivers/adapter.go b/vendor/github.com/docker/docker/volume/drivers/adapter.go index 6846a3a8..a5db6991 100644 --- a/vendor/github.com/docker/docker/volume/drivers/adapter.go +++ b/vendor/github.com/docker/docker/volume/drivers/adapter.go @@ -11,8 +11,8 @@ func (a *volumeDriverAdapter) Name() string { return a.name } -func (a *volumeDriverAdapter) Create(name string) (volume.Volume, error) { - err := a.proxy.Create(name) +func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volume.Volume, error) { + err := a.proxy.Create(name, opts) if err != nil { return nil, err } @@ -33,6 +33,11 @@ type volumeAdapter struct { eMount string // ephemeral host volume path } +type proxyVolume struct { + Name string + Mountpoint string +} + func (a *volumeAdapter) Name() string { return a.name } diff --git a/vendor/github.com/docker/docker/volume/drivers/api.go b/vendor/github.com/docker/docker/volume/drivers/api.go deleted file mode 100644 index ced82e08..00000000 --- a/vendor/github.com/docker/docker/volume/drivers/api.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type VolumeDriver -name VolumeDriver - -package volumedrivers - -import "github.com/docker/docker/volume" - -// NewVolumeDriver returns a driver has the given name mapped on the given client. -func NewVolumeDriver(name string, c client) volume.Driver { - proxy := &volumeDriverProxy{c} - return &volumeDriverAdapter{name, proxy} -} - -// VolumeDriver defines the available functions that volume plugins must implement. -type VolumeDriver interface { - // Create a volume with the given name - Create(name string) (err error) - // Remove the volume with the given name - Remove(name string) (err error) - // Get the mountpoint of the given volume - Path(name string) (mountpoint string, err error) - // Mount the given volume and return the mountpoint - Mount(name string) (mountpoint string, err error) - // Unmount the given volume - Unmount(name string) (err error) -} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint.go b/vendor/github.com/docker/docker/volume/drivers/extpoint.go index 09bb7d43..3927b29c 100644 --- a/vendor/github.com/docker/docker/volume/drivers/extpoint.go +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint.go @@ -1,3 +1,5 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver + package volumedrivers import ( @@ -13,6 +15,30 @@ import ( var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)} +// NewVolumeDriver returns a driver has the given name mapped on the given client. +func NewVolumeDriver(name string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name, proxy} +} + +type opts map[string]string + +// volumeDriver defines the available functions that volume plugins must implement. +// This interface is only defined to generate the proxy objects. +// It's not intended to be public or reused. +type volumeDriver interface { + // Create a volume with the given name + Create(name string, opts opts) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name string) (err error) +} + type driverExtpoint struct { extensions map[string]volume.Driver sync.Mutex @@ -51,8 +77,8 @@ func Unregister(name string) bool { // there is a VolumeDriver plugin available with the given name. func Lookup(name string) (volume.Driver, error) { drivers.Lock() - defer drivers.Unlock() ext, ok := drivers.extensions[name] + drivers.Unlock() if ok { return ext, nil } @@ -61,7 +87,22 @@ func Lookup(name string) (volume.Driver, error) { return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) } + drivers.Lock() + defer drivers.Unlock() + if ext, ok := drivers.extensions[name]; ok { + return ext, nil + } + d := NewVolumeDriver(name, pl.Client) drivers.extensions[name] = d return d, nil } + +// GetDriver returns a volume driver by it's name. +// If the driver is empty, it looks for the local driver. +func GetDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return Lookup(name) +} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go b/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go new file mode 100644 index 00000000..8ab60c95 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go @@ -0,0 +1,22 @@ +package volumedrivers + +import ( + "testing" + + "github.com/docker/docker/volume/testutils" +) + +func TestGetDriver(t *testing.T) { + _, err := GetDriver("missing") + if err == nil { + t.Fatal("Expected error, was nil") + } + Register(volumetestutils.FakeDriver{}, "fake") + d, err := GetDriver("fake") + if err != nil { + t.Fatal(err) + } + if d.Name() != "fake" { + t.Fatalf("Expected fake driver, got %s\n", d.Name()) + } +} diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy.go b/vendor/github.com/docker/docker/volume/drivers/proxy.go index 9fd68855..f2e2f044 100644 --- a/vendor/github.com/docker/docker/volume/drivers/proxy.go +++ b/vendor/github.com/docker/docker/volume/drivers/proxy.go @@ -14,19 +14,21 @@ type volumeDriverProxy struct { type volumeDriverProxyCreateRequest struct { Name string + Opts opts } type volumeDriverProxyCreateResponse struct { Err string } -func (pp *volumeDriverProxy) Create(name string) (err error) { +func (pp *volumeDriverProxy) Create(name string, opts opts) (err error) { var ( req volumeDriverProxyCreateRequest ret volumeDriverProxyCreateResponse ) req.Name = name + req.Opts = opts if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { return } diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy_test.go b/vendor/github.com/docker/docker/volume/drivers/proxy_test.go index cadf8c0d..aa1d67e3 100644 --- a/vendor/github.com/docker/docker/volume/drivers/proxy_test.go +++ b/vendor/github.com/docker/docker/volume/drivers/proxy_test.go @@ -50,7 +50,7 @@ func TestVolumeRequestError(t *testing.T) { driver := volumeDriverProxy{client} - if err = driver.Create("volume"); err == nil { + if err = driver.Create("volume", nil); err == nil { t.Fatal("Expected error, was nil") } diff --git a/vendor/github.com/docker/docker/volume/local/local.go b/vendor/github.com/docker/docker/volume/local/local.go index 0507f07e..e0c3a644 100644 --- a/vendor/github.com/docker/docker/volume/local/local.go +++ b/vendor/github.com/docker/docker/volume/local/local.go @@ -9,9 +9,11 @@ import ( "io/ioutil" "os" "path/filepath" - "strings" "sync" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/utils" "github.com/docker/docker/volume" ) @@ -23,15 +25,22 @@ const ( volumesPathName = "volumes" ) -var oldVfsDir = filepath.Join("vfs", "dir") +var ( + // ErrNotFound is the typed error returned when the requested volume name can't be found + ErrNotFound = errors.New("volume not found") + // volumeNameRegex ensures the name asigned for the volume is valid. + // This name is used to create the bind directory, so we need to avoid characters that + // would make the path to escape the root directory. + volumeNameRegex = utils.RestrictedNamePattern +) // New instantiates a new Root instance with the provided scope. Scope // is the base path that the Root instance uses to store its // volumes. The base path is created here if it does not exist. -func New(scope string) (*Root, error) { +func New(scope string, rootUID, rootGID int) (*Root, error) { rootDirectory := filepath.Join(scope, volumesPathName) - if err := os.MkdirAll(rootDirectory, 0700); err != nil { + if err := idtools.MkdirAllAs(rootDirectory, 0700, rootUID, rootGID); err != nil { return nil, err } @@ -39,6 +48,8 @@ func New(scope string) (*Root, error) { scope: scope, path: rootDirectory, volumes: make(map[string]*localVolume), + rootUID: rootUID, + rootGID: rootGID, } dirs, err := ioutil.ReadDir(rootDirectory) @@ -54,6 +65,7 @@ func New(scope string) (*Root, error) { path: r.DataPath(name), } } + return r, nil } @@ -65,6 +77,17 @@ type Root struct { scope string path string volumes map[string]*localVolume + rootUID int + rootGID int +} + +// List lists all the volumes +func (r *Root) List() []volume.Volume { + var ls []volume.Volume + for _, v := range r.volumes { + ls = append(ls, v) + } + return ls } // DataPath returns the constructed path of this volume. @@ -80,27 +103,32 @@ func (r *Root) Name() string { // Create creates a new volume.Volume with the provided name, creating // the underlying directory tree required for this volume in the // process. -func (r *Root) Create(name string) (volume.Volume, error) { +func (r *Root) Create(name string, _ map[string]string) (volume.Volume, error) { + if err := r.validateName(name); err != nil { + return nil, err + } + r.m.Lock() defer r.m.Unlock() v, exists := r.volumes[name] - if !exists { - path := r.DataPath(name) - if err := os.MkdirAll(path, 0755); err != nil { - if os.IsExist(err) { - return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) - } - return nil, err - } - v = &localVolume{ - driverName: r.Name(), - name: name, - path: path, - } - r.volumes[name] = v + if exists { + return v, nil } - v.use() + + path := r.DataPath(name) + if err := idtools.MkdirAllAs(path, 0755, r.rootUID, r.rootGID); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, err + } + v = &localVolume{ + driverName: r.Name(), + name: name, + path: path, + } + r.volumes[name] = v return v, nil } @@ -111,44 +139,58 @@ func (r *Root) Create(name string) (volume.Volume, error) { func (r *Root) Remove(v volume.Volume) error { r.m.Lock() defer r.m.Unlock() + lv, ok := v.(*localVolume) if !ok { return errors.New("unknown volume type") } - lv.release() - if lv.usedCount == 0 { - realPath, err := filepath.EvalSymlinks(lv.path) - if err != nil { + + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + if !os.IsNotExist(err) { return err } - if !r.scopedPath(realPath) { - return fmt.Errorf("Unable to remove a directory of out the Docker root: %s", realPath) - } + realPath = filepath.Dir(lv.path) + } - if err := os.RemoveAll(realPath); err != nil { - return err - } + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root %s: %s", r.scope, realPath) + } - delete(r.volumes, lv.name) - return os.RemoveAll(filepath.Dir(lv.path)) + if err := removePath(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return removePath(filepath.Dir(lv.path)) +} + +func removePath(path string) error { + if err := os.RemoveAll(path); err != nil { + if os.IsNotExist(err) { + return nil + } + return err } return nil } -// scopedPath verifies that the path where the volume is located -// is under Docker's root and the valid local paths. -func (r *Root) scopedPath(realPath string) bool { - // Volumes path for Docker version >= 1.7 - if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) { - return true +// Get looks up the volume for the given name and returns it if found +func (r *Root) Get(name string) (volume.Volume, error) { + r.m.Lock() + v, exists := r.volumes[name] + r.m.Unlock() + if !exists { + return nil, ErrNotFound } + return v, nil +} - // Volumes path for Docker version < 1.7 - if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { - return true +func (r *Root) validateName(name string) error { + if !volumeNameRegex.MatchString(name) { + return derr.ErrorCodeVolumeName.WithArgs(name, utils.RestrictedNameChars) } - - return false + return nil } // localVolume implements the Volume interface from the volume package and @@ -188,15 +230,3 @@ func (v *localVolume) Mount() (string, error) { func (v *localVolume) Unmount() error { return nil } - -func (v *localVolume) use() { - v.m.Lock() - v.usedCount++ - v.m.Unlock() -} - -func (v *localVolume) release() { - v.m.Lock() - v.usedCount-- - v.m.Unlock() -} diff --git a/vendor/github.com/docker/docker/volume/local/local_test.go b/vendor/github.com/docker/docker/volume/local/local_test.go new file mode 100644 index 00000000..2c5b800a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_test.go @@ -0,0 +1,126 @@ +package local + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestRemove(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + vol, err = r.Create("testing2", nil) + if err != nil { + t.Fatal(err) + } + if err := os.RemoveAll(vol.Path()); err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(vol.Path()); err != nil && !os.IsNotExist(err) { + t.Fatal("volume dir not removed") + } + + if len(r.List()) != 0 { + t.Fatal("expected there to be no volumes") + } +} + +func TestInitializeWithVolumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + v, err := r.Get(vol.Name()) + if err != nil { + t.Fatal(err) + } + + if v.Path() != vol.Path() { + t.Fatal("expected to re-initialize root with existing volumes") + } +} + +func TestCreate(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + cases := map[string]bool{ + "name": true, + "name-with-dash": true, + "name_with_underscore": true, + "name/with/slash": false, + "name/with/../../slash": false, + "./name": false, + "../name": false, + "./": false, + "../": false, + "~": false, + ".": false, + "..": false, + "...": false, + } + + for name, success := range cases { + v, err := r.Create(name, nil) + if success { + if err != nil { + t.Fatal(err) + } + if v.Name() != name { + t.Fatalf("Expected volume with name %s, got %s", name, v.Name()) + } + } else { + if err == nil { + t.Fatalf("Expected error creating volume with name %s, got nil", name) + } + } + } +} diff --git a/vendor/github.com/docker/docker/volume/local/local_unix.go b/vendor/github.com/docker/docker/volume/local/local_unix.go new file mode 100644 index 00000000..60f0e765 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_unix.go @@ -0,0 +1,29 @@ +// +build linux freebsd + +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "path/filepath" + "strings" +) + +var oldVfsDir = filepath.Join("vfs", "dir") + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} diff --git a/vendor/github.com/docker/docker/volume/local/local_windows.go b/vendor/github.com/docker/docker/volume/local/local_windows.go new file mode 100644 index 00000000..38812aa2 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_windows.go @@ -0,0 +1,18 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "path/filepath" + "strings" +) + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/volume/store/store.go b/vendor/github.com/docker/docker/volume/store/store.go new file mode 100644 index 00000000..73ae79fb --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store.go @@ -0,0 +1,228 @@ +package store + +import ( + "errors" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +var ( + // ErrVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container + ErrVolumeInUse = errors.New("volume is in use") + // ErrNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store + ErrNoSuchVolume = errors.New("no such volume") + // ErrInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform + ErrInvalidName = errors.New("volume name is not valid on this platform") +) + +// New initializes a VolumeStore to keep +// reference counting of volumes in the system. +func New() *VolumeStore { + return &VolumeStore{ + vols: make(map[string]*volumeCounter), + locks: &locker.Locker{}, + } +} + +func (s *VolumeStore) get(name string) (*volumeCounter, bool) { + s.globalLock.Lock() + vc, exists := s.vols[name] + s.globalLock.Unlock() + return vc, exists +} + +func (s *VolumeStore) set(name string, vc *volumeCounter) { + s.globalLock.Lock() + s.vols[name] = vc + s.globalLock.Unlock() +} + +func (s *VolumeStore) remove(name string) { + s.globalLock.Lock() + delete(s.vols, name) + s.globalLock.Unlock() +} + +// VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts +type VolumeStore struct { + vols map[string]*volumeCounter + locks *locker.Locker + globalLock sync.Mutex +} + +// volumeCounter keeps track of references to a volume +type volumeCounter struct { + volume.Volume + count uint +} + +// AddAll adds a list of volumes to the store +func (s *VolumeStore) AddAll(vols []volume.Volume) { + for _, v := range vols { + s.vols[normaliseVolumeName(v.Name())] = &volumeCounter{v, 0} + } +} + +// Create tries to find an existing volume with the given name or create a new one from the passed in driver +func (s *VolumeStore) Create(name, driverName string, opts map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + if vc, exists := s.get(name); exists { + v := vc.Volume + return v, nil + } + logrus.Debugf("Registering new volume reference: driver %s, name %s", driverName, name) + + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, err + } + + // Validate the name in a platform-specific manner + valid, err := volume.IsVolumeNameValid(name) + if err != nil { + return nil, err + } + if !valid { + return nil, ErrInvalidName + } + + v, err := vd.Create(name, opts) + if err != nil { + return nil, err + } + + s.set(name, &volumeCounter{v, 0}) + return v, nil +} + +// Get looks if a volume with the given name exists and returns it if so +func (s *VolumeStore) Get(name string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + vc, exists := s.get(name) + if !exists { + return nil, ErrNoSuchVolume + } + return vc.Volume, nil +} + +// Remove removes the requested volume. A volume is not removed if the usage count is > 0 +func (s *VolumeStore) Remove(v volume.Volume) error { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) + vc, exists := s.get(name) + if !exists { + return ErrNoSuchVolume + } + + if vc.count > 0 { + return ErrVolumeInUse + } + + vd, err := volumedrivers.GetDriver(vc.DriverName()) + if err != nil { + return err + } + if err := vd.Remove(vc.Volume); err != nil { + return err + } + + s.remove(name) + return nil +} + +// Increment increments the usage count of the passed in volume by 1 +func (s *VolumeStore) Increment(v volume.Volume) { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + logrus.Debugf("Incrementing volume reference: driver %s, name %s", v.DriverName(), v.Name()) + vc, exists := s.get(name) + if !exists { + s.set(name, &volumeCounter{v, 1}) + return + } + vc.count++ +} + +// Decrement decrements the usage count of the passed in volume by 1 +func (s *VolumeStore) Decrement(v volume.Volume) { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + logrus.Debugf("Decrementing volume reference: driver %s, name %s", v.DriverName(), v.Name()) + + vc, exists := s.get(name) + if !exists { + return + } + if vc.count == 0 { + return + } + vc.count-- +} + +// Count returns the usage count of the passed in volume +func (s *VolumeStore) Count(v volume.Volume) uint { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + vc, exists := s.get(name) + if !exists { + return 0 + } + return vc.count +} + +// List returns all the available volumes +func (s *VolumeStore) List() []volume.Volume { + s.globalLock.Lock() + defer s.globalLock.Unlock() + var ls []volume.Volume + for _, vc := range s.vols { + ls = append(ls, vc.Volume) + } + return ls +} + +// FilterByDriver returns the available volumes filtered by driver name +func (s *VolumeStore) FilterByDriver(name string) []volume.Volume { + return s.filter(byDriver(name)) +} + +// filterFunc defines a function to allow filter volumes in the store +type filterFunc func(vol volume.Volume) bool + +// byDriver generates a filterFunc to filter volumes by their driver name +func byDriver(name string) filterFunc { + return func(vol volume.Volume) bool { + return vol.DriverName() == name + } +} + +// filter returns the available volumes filtered by a filterFunc function +func (s *VolumeStore) filter(f filterFunc) []volume.Volume { + s.globalLock.Lock() + defer s.globalLock.Unlock() + var ls []volume.Volume + for _, vc := range s.vols { + if f(vc.Volume) { + ls = append(ls, vc.Volume) + } + } + return ls +} diff --git a/vendor/github.com/docker/docker/volume/store/store_test.go b/vendor/github.com/docker/docker/volume/store/store_test.go new file mode 100644 index 00000000..0b38ce62 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_test.go @@ -0,0 +1,152 @@ +package store + +import ( + "testing" + + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + vt "github.com/docker/docker/volume/testutils" +) + +func TestList(t *testing.T) { + volumedrivers.Register(vt.FakeDriver{}, "fake") + s := New() + s.AddAll([]volume.Volume{vt.NewFakeVolume("fake1"), vt.NewFakeVolume("fake2")}) + l := s.List() + if len(l) != 2 { + t.Fatalf("Expected 2 volumes in the store, got %v: %v", len(l), l) + } +} + +func TestGet(t *testing.T) { + volumedrivers.Register(vt.FakeDriver{}, "fake") + s := New() + s.AddAll([]volume.Volume{vt.NewFakeVolume("fake1"), vt.NewFakeVolume("fake2")}) + v, err := s.Get("fake1") + if err != nil { + t.Fatal(err) + } + if v.Name() != "fake1" { + t.Fatalf("Expected fake1 volume, got %v", v) + } + + if _, err := s.Get("fake4"); err != ErrNoSuchVolume { + t.Fatalf("Expected ErrNoSuchVolume error, got %v", err) + } +} + +func TestCreate(t *testing.T) { + volumedrivers.Register(vt.FakeDriver{}, "fake") + s := New() + v, err := s.Create("fake1", "fake", nil) + if err != nil { + t.Fatal(err) + } + if v.Name() != "fake1" { + t.Fatalf("Expected fake1 volume, got %v", v) + } + if l := s.List(); len(l) != 1 { + t.Fatalf("Expected 1 volume in the store, got %v: %v", len(l), l) + } + + if _, err := s.Create("none", "none", nil); err == nil { + t.Fatalf("Expected unknown driver error, got nil") + } + + _, err = s.Create("fakeError", "fake", map[string]string{"error": "create error"}) + if err == nil || err.Error() != "create error" { + t.Fatalf("Expected create error, got %v", err) + } +} + +func TestRemove(t *testing.T) { + volumedrivers.Register(vt.FakeDriver{}, "fake") + s := New() + if err := s.Remove(vt.NoopVolume{}); err != ErrNoSuchVolume { + t.Fatalf("Expected ErrNoSuchVolume error, got %v", err) + } + v, err := s.Create("fake1", "fake", nil) + if err != nil { + t.Fatal(err) + } + s.Increment(v) + if err := s.Remove(v); err != ErrVolumeInUse { + t.Fatalf("Expected ErrVolumeInUse error, got %v", err) + } + s.Decrement(v) + if err := s.Remove(v); err != nil { + t.Fatal(err) + } + if l := s.List(); len(l) != 0 { + t.Fatalf("Expected 0 volumes in the store, got %v, %v", len(l), l) + } +} + +func TestIncrement(t *testing.T) { + s := New() + v := vt.NewFakeVolume("fake1") + s.Increment(v) + if l := s.List(); len(l) != 1 { + t.Fatalf("Expected 1 volume, got %v, %v", len(l), l) + } + if c := s.Count(v); c != 1 { + t.Fatalf("Expected 1 counter, got %v", c) + } + + s.Increment(v) + if l := s.List(); len(l) != 1 { + t.Fatalf("Expected 1 volume, got %v, %v", len(l), l) + } + if c := s.Count(v); c != 2 { + t.Fatalf("Expected 2 counter, got %v", c) + } + + v2 := vt.NewFakeVolume("fake2") + s.Increment(v2) + if l := s.List(); len(l) != 2 { + t.Fatalf("Expected 2 volume, got %v, %v", len(l), l) + } +} + +func TestDecrement(t *testing.T) { + s := New() + v := vt.NoopVolume{} + s.Decrement(v) + if c := s.Count(v); c != 0 { + t.Fatalf("Expected 0 volumes, got %v", c) + } + + s.Increment(v) + s.Increment(v) + s.Decrement(v) + if c := s.Count(v); c != 1 { + t.Fatalf("Expected 1 volume, got %v", c) + } + + s.Decrement(v) + if c := s.Count(v); c != 0 { + t.Fatalf("Expected 0 volumes, got %v", c) + } + + // Test counter cannot be negative. + s.Decrement(v) + if c := s.Count(v); c != 0 { + t.Fatalf("Expected 0 volumes, got %v", c) + } +} + +func TestFilterByDriver(t *testing.T) { + s := New() + + s.Increment(vt.NewFakeVolume("fake1")) + s.Increment(vt.NewFakeVolume("fake2")) + s.Increment(vt.NoopVolume{}) + + if l := s.FilterByDriver("fake"); len(l) != 2 { + t.Fatalf("Expected 2 volumes, got %v, %v", len(l), l) + } + + if l := s.FilterByDriver("noop"); len(l) != 1 { + t.Fatalf("Expected 1 volume, got %v, %v", len(l), l) + } +} diff --git a/vendor/github.com/docker/docker/volume/store/store_unix.go b/vendor/github.com/docker/docker/volume/store/store_unix.go new file mode 100644 index 00000000..319c541d --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd + +package store + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. This is a no-op on Unix-like platforms +func normaliseVolumeName(name string) string { + return name +} diff --git a/vendor/github.com/docker/docker/volume/store/store_windows.go b/vendor/github.com/docker/docker/volume/store/store_windows.go new file mode 100644 index 00000000..a42c1f84 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_windows.go @@ -0,0 +1,12 @@ +package store + +import "strings" + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. On Windows, as NTFS is case insensitive, under +// c:\ProgramData\Docker\Volumes\, the folders John and john would be synonymous. +// Hence we can't allow the volume "John" and "john" to be created as seperate +// volumes. +func normaliseVolumeName(name string) string { + return strings.ToLower(name) +} diff --git a/vendor/github.com/docker/docker/volume/testutils/testutils.go b/vendor/github.com/docker/docker/volume/testutils/testutils.go new file mode 100644 index 00000000..e1c75e91 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/testutils/testutils.go @@ -0,0 +1,68 @@ +package volumetestutils + +import ( + "fmt" + + "github.com/docker/docker/volume" +) + +// NoopVolume is a volume that doesn't perform any operation +type NoopVolume struct{} + +// Name is the name of the volume +func (NoopVolume) Name() string { return "noop" } + +// DriverName is the name of the driver +func (NoopVolume) DriverName() string { return "noop" } + +// Path is the filesystem path to the volume +func (NoopVolume) Path() string { return "noop" } + +// Mount mounts the volume in the container +func (NoopVolume) Mount() (string, error) { return "noop", nil } + +// Unmount unmounts the volume from the container +func (NoopVolume) Unmount() error { return nil } + +// FakeVolume is a fake volume with a random name +type FakeVolume struct { + name string +} + +// NewFakeVolume creates a new fake volume for testing +func NewFakeVolume(name string) volume.Volume { + return FakeVolume{name: name} +} + +// Name is the name of the volume +func (f FakeVolume) Name() string { return f.name } + +// DriverName is the name of the driver +func (FakeVolume) DriverName() string { return "fake" } + +// Path is the filesystem path to the volume +func (FakeVolume) Path() string { return "fake" } + +// Mount mounts the volume in the container +func (FakeVolume) Mount() (string, error) { return "fake", nil } + +// Unmount unmounts the volume from the container +func (FakeVolume) Unmount() error { return nil } + +// FakeDriver is a driver that generates fake volumes +type FakeDriver struct{} + +// Name is the name of the driver +func (FakeDriver) Name() string { return "fake" } + +// Create initializes a fake volume. +// It returns an error if the options include an "error" key with a message +func (FakeDriver) Create(name string, opts map[string]string) (volume.Volume, error) { + if opts != nil && opts["error"] != "" { + return nil, fmt.Errorf(opts["error"]) + } + return NewFakeVolume(name), nil +} + +// Remove deletes a volume. +func (FakeDriver) Remove(v volume.Volume) error { return nil } diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go index 19c9d77a..98f90f06 100644 --- a/vendor/github.com/docker/docker/volume/volume.go +++ b/vendor/github.com/docker/docker/volume/volume.go @@ -1,5 +1,15 @@ package volume +import ( + "os" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/system" +) + // DefaultDriverName is the driver name used for the driver // implemented in the local package. const DefaultDriverName string = "local" @@ -9,7 +19,7 @@ type Driver interface { // Name returns the name of the volume driver. Name() string // Create makes a new volume with the given id. - Create(string) (Volume, error) + Create(name string, opts map[string]string) (Volume, error) // Remove deletes the volume. Remove(Volume) error } @@ -29,33 +39,134 @@ type Volume interface { Unmount() error } -// read-write modes -var rwModes = map[string]bool{ - "rw": true, - "rw,Z": true, - "rw,z": true, - "z,rw": true, - "Z,rw": true, - "Z": true, - "z": true, +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +type MountPoint struct { + Source string // Container host directory + Destination string // Inside the container + RW bool // True if writable + Name string // Name set by user + Driver string // Volume driver to use + Volume Volume `json:"-"` + + // Note Mode is not used on Windows + Mode string `json:"Relabel"` // Originally field was `Relabel`" } -// read-only modes -var roModes = map[string]bool{ - "ro": true, - "ro,Z": true, - "ro,z": true, - "z,ro": true, - "Z,ro": true, +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +func (m *MountPoint) Setup() (string, error) { + if m.Volume != nil { + return m.Volume.Mount() + } + if len(m.Source) > 0 { + if _, err := os.Stat(m.Source); err != nil { + if !os.IsNotExist(err) { + return "", err + } + if runtime.GOOS != "windows" { // Windows does not have deprecation issues here + logrus.Warnf("Auto-creating non-existant volume host path %s, this is deprecated and will be removed soon", m.Source) + if err := system.MkdirAll(m.Source, 0755); err != nil { + return "", err + } + } + } + return m.Source, nil + } + return "", derr.ErrorCodeMountSetup } -// ValidateMountMode will make sure the mount mode is valid. -// returns if it's a valid mount mode and if it's read-write or not. -func ValidateMountMode(mode string) (bool, bool) { - return roModes[mode] || rwModes[mode], rwModes[mode] +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source } -// ReadWrite tells you if a mode string is a valid read-only mode or not. +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)] +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. func ReadWrite(mode string) bool { - return rwModes[mode] + return rwModes[strings.ToLower(mode)] +} + +// ParseVolumesFrom ensure that the supplied volumes-from is valid. +func ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", derr.ErrorCodeVolumeFromBlank.WithArgs(spec) + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !ValidMountMode(mode) { + return "", "", derr.ErrorCodeVolumeInvalidMode.WithArgs(mode) + } + } + return id, mode, nil +} + +// SplitN splits raw into a maximum of n parts, separated by a separator colon. +// A separator colon is the last `:` character in the regex `[/:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). +// This allows to correctly split strings such as `C:\foo:D:\:rw`. +func SplitN(raw string, n int) []string { + var array []string + if len(raw) == 0 || raw[0] == ':' { + // invalid + return nil + } + // numberOfParts counts the number of parts separated by a separator colon + numberOfParts := 0 + // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. + left := 0 + // right represents the right-most cursor in raw incremented with the loop. Note this + // starts at index 1 as index 0 is already handle above as a special case. + for right := 1; right < len(raw); right++ { + // stop parsing if reached maximum number of parts + if n >= 0 && numberOfParts >= n { + break + } + if raw[right] != ':' { + continue + } + potentialDriveLetter := raw[right-1] + if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { + if right > 1 { + beforePotentialDriveLetter := raw[right-2] + if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '/' && beforePotentialDriveLetter != '\\' { + // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. + } + // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. + } else { + // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + } + // need to take care of the last part + if left < len(raw) { + if n >= 0 && numberOfParts >= n { + // if the maximum number of parts is reached, just append the rest to the last part + // left-1 is at the last `:` that needs to be included since not considered a separator. + array[n-1] += raw[left-1:] + } else { + array = append(array, raw[left:]) + } + } + return array } diff --git a/vendor/github.com/docker/docker/volume/volume_test.go b/vendor/github.com/docker/docker/volume/volume_test.go new file mode 100644 index 00000000..2ee62e63 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_test.go @@ -0,0 +1,261 @@ +package volume + +import ( + "runtime" + "strings" + "testing" +) + +func TestParseMountSpec(t *testing.T) { + var ( + valid []string + invalid map[string]string + ) + + if runtime.GOOS == "windows" { + valid = []string{ + `d:\`, + `d:`, + `d:\path`, + `d:\path with space`, + // TODO Windows post TP4 - readonly support `d:\pathandmode:ro`, + `c:\:d:\`, + `c:\windows\:d:`, + `c:\windows:d:\s p a c e`, + `c:\windows:d:\s p a c e:RW`, + `c:\program files:d:\s p a c e i n h o s t d i r`, + `0123456789name:d:`, + `MiXeDcAsEnAmE:d:`, + `name:D:`, + `name:D::rW`, + `name:D::RW`, + // TODO Windows post TP4 - readonly support `name:D::RO`, + `c:/:d:/forward/slashes/are/good/too`, + // TODO Windows post TP4 - readonly support `c:/:d:/including with/spaces:ro`, + `c:\Windows`, // With capital + `c:\Program Files (x86)`, // With capitals and brackets + } + invalid = map[string]string{ + ``: "Invalid volume specification: ", + `.`: "Invalid volume specification: ", + `..\`: "Invalid volume specification: ", + `c:\:..\`: "Invalid volume specification: ", + `c:\:d:\:xyzzy`: "Invalid volume specification: ", + `c:`: "cannot be c:", + `c:\`: `cannot be c:\`, + `c:\notexist:d:`: `The system cannot find the file specified`, + `c:\windows\system32\ntdll.dll:d:`: `Source 'c:\windows\system32\ntdll.dll' is not a directory`, + `name<:d:`: `Invalid volume specification`, + `name>:d:`: `Invalid volume specification`, + `name::d:`: `Invalid volume specification`, + `name":d:`: `Invalid volume specification`, + `name\:d:`: `Invalid volume specification`, + `name*:d:`: `Invalid volume specification`, + `name|:d:`: `Invalid volume specification`, + `name?:d:`: `Invalid volume specification`, + `name/:d:`: `Invalid volume specification`, + `d:\pathandmode:rw`: `Invalid volume specification`, + `con:d:`: `cannot be a reserved word for Windows filenames`, + `PRN:d:`: `cannot be a reserved word for Windows filenames`, + `aUx:d:`: `cannot be a reserved word for Windows filenames`, + `nul:d:`: `cannot be a reserved word for Windows filenames`, + `com1:d:`: `cannot be a reserved word for Windows filenames`, + `com2:d:`: `cannot be a reserved word for Windows filenames`, + `com3:d:`: `cannot be a reserved word for Windows filenames`, + `com4:d:`: `cannot be a reserved word for Windows filenames`, + `com5:d:`: `cannot be a reserved word for Windows filenames`, + `com6:d:`: `cannot be a reserved word for Windows filenames`, + `com7:d:`: `cannot be a reserved word for Windows filenames`, + `com8:d:`: `cannot be a reserved word for Windows filenames`, + `com9:d:`: `cannot be a reserved word for Windows filenames`, + `lpt1:d:`: `cannot be a reserved word for Windows filenames`, + `lpt2:d:`: `cannot be a reserved word for Windows filenames`, + `lpt3:d:`: `cannot be a reserved word for Windows filenames`, + `lpt4:d:`: `cannot be a reserved word for Windows filenames`, + `lpt5:d:`: `cannot be a reserved word for Windows filenames`, + `lpt6:d:`: `cannot be a reserved word for Windows filenames`, + `lpt7:d:`: `cannot be a reserved word for Windows filenames`, + `lpt8:d:`: `cannot be a reserved word for Windows filenames`, + `lpt9:d:`: `cannot be a reserved word for Windows filenames`, + } + + } else { + valid = []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/rw:/ro", + } + invalid = map[string]string{ + "": "Invalid volume specification", + "./": "Invalid volume destination", + "../": "Invalid volume destination", + "/:../": "Invalid volume destination", + "/:path": "Invalid volume destination", + ":": "Invalid volume specification", + "/tmp:": "Invalid volume destination", + ":test": "Invalid volume specification", + ":/test": "Invalid volume specification", + "tmp:": "Invalid volume destination", + ":test:": "Invalid volume specification", + "::": "Invalid volume specification", + ":::": "Invalid volume specification", + "/tmp:::": "Invalid volume specification", + ":/tmp::": "Invalid volume specification", + "/path:rw": "Invalid volume specification", + "/path:ro": "Invalid volume specification", + "/rw:rw": "Invalid volume specification", + "path:ro": "Invalid volume specification", + "/path:/path:sw": `invalid mode: "sw"`, + "/path:/path:rwz": `invalid mode: "rwz"`, + } + } + + for _, path := range valid { + if _, err := ParseMountSpec(path, "local"); err != nil { + t.Fatalf("ParseMountSpec(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ParseMountSpec(path, "local"); err == nil { + t.Fatalf("ParseMountSpec(`%q`) should have failed validation. Err %v", path, err) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ParseMountSpec(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } +} + +func TestSplitN(t *testing.T) { + for _, x := range []struct { + input string + n int + expected []string + }{ + {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, + {`:C:\foo:d:`, -1, nil}, + {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, + {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, + {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, + + {`d:\`, -1, []string{`d:\`}}, + {`d:`, -1, []string{`d:`}}, + {`d:\path`, -1, []string{`d:\path`}}, + {`d:\path with space`, -1, []string{`d:\path with space`}}, + {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, + {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, + {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, + {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, + {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, + {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, + {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, + {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, + {`name:D:`, -1, []string{`name`, `D:`}}, + {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, + {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, + {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, + {`c:\Windows`, -1, []string{`c:\Windows`}}, + {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, + + {``, -1, nil}, + {`.`, -1, []string{`.`}}, + {`..\`, -1, []string{`..\`}}, + {`c:\:..\`, -1, []string{`c:\`, `..\`}}, + {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, + } { + res := SplitN(x.input, x.n) + if len(res) < len(x.expected) { + t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) + } + for i, e := range res { + if e != x.expected[i] { + t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) + } + } + } +} + +// testParseMountSpec is a structure used by TestParseMountSpecSplit for +// specifying test cases for the ParseMountSpec() function. +type testParseMountSpec struct { + bind string + driver string + expDest string + expSource string + expName string + expDriver string + expRW bool + fail bool +} + +func TestParseMountSpecSplit(t *testing.T) { + var cases []testParseMountSpec + if runtime.GOOS == "windows" { + cases = []testParseMountSpec{ + {`c:\:d:`, "local", `d:`, `c:\`, ``, "", true, false}, + {`c:\:d:\`, "local", `d:\`, `c:\`, ``, "", true, false}, + // TODO Windows post TP4 - Add readonly support {`c:\:d:\:ro`, "local", `d:\`, `c:\`, ``, "", false, false}, + {`c:\:d:\:rw`, "local", `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:foo`, "local", `d:\`, `c:\`, ``, "", false, true}, + {`name:d::rw`, "local", `d:`, ``, `name`, "local", true, false}, + {`name:d:`, "local", `d:`, ``, `name`, "local", true, false}, + // TODO Windows post TP4 - Add readonly support {`name:d::ro`, "local", `d:`, ``, `name`, "local", false, false}, + {`name:c:`, "", ``, ``, ``, "", true, true}, + {`driver/name:c:`, "", ``, ``, ``, "", true, true}, + } + } else { + cases = []testParseMountSpec{ + {"/tmp:/tmp1", "", "/tmp1", "/tmp", "", "", true, false}, + {"/tmp:/tmp2:ro", "", "/tmp2", "/tmp", "", "", false, false}, + {"/tmp:/tmp3:rw", "", "/tmp3", "/tmp", "", "", true, false}, + {"/tmp:/tmp4:foo", "", "", "", "", "", false, true}, + {"name:/named1", "", "/named1", "", "name", "local", true, false}, + {"name:/named2", "external", "/named2", "", "name", "external", true, false}, + {"name:/named3:ro", "local", "/named3", "", "name", "local", false, false}, + {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "local", true, false}, + {"/tmp:tmp", "", "", "", "", "", true, true}, + } + } + + for _, c := range cases { + m, err := ParseMountSpec(c.bind, c.driver) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) + } + continue + } + + if m == nil || err != nil { + t.Fatalf("ParseMountSpec failed for spec %s driver %s error %v\n", c.bind, c.driver, err.Error()) + continue + } + + if m.Destination != c.expDest { + t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind) + } + + if m.Source != c.expSource { + t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind) + } + + if m.Name != c.expName { + t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind) + } + + if m.Driver != c.expDriver { + t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind) + } + + if m.RW != c.expRW { + t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind) + } + } +} diff --git a/vendor/github.com/docker/docker/volume/volume_unix.go b/vendor/github.com/docker/docker/volume/volume_unix.go new file mode 100644 index 00000000..8c98a3d9 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_unix.go @@ -0,0 +1,132 @@ +// +build linux freebsd darwin + +package volume + +import ( + "fmt" + "path/filepath" + "strings" + + derr "github.com/docker/docker/errors" +) + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "rw,Z": true, + "rw,z": true, + "z,rw": true, + "Z,rw": true, + "Z": true, + "z": true, +} + +// read-only modes +var roModes = map[string]bool{ + "ro": true, + "ro,Z": true, + "ro,z": true, + "z,ro": true, + "Z,ro": true, +} + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Only bind mounts and local volumes can be used in old versions of Docker. +func (m *MountPoint) BackwardsCompatible() bool { + return len(m.Source) > 0 || m.Driver == DefaultDriverName +} + +// HasResource checks whether the given absolute path for a container is in +// this mount point. If the relative path starts with `../` then the resource +// is outside of this mount point, but we can't simply check for this prefix +// because it misses `..` which is also outside of the mount, so check both. +func (m *MountPoint) HasResource(absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +// ParseMountSpec validates the configuration of mount information is valid. +func ParseMountSpec(spec, volumeDriver string) (*MountPoint, error) { + spec = filepath.ToSlash(spec) + + mp := &MountPoint{ + RW: true, + } + if strings.Count(spec, ":") > 2 { + return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + } + + arr := strings.SplitN(spec, ":", 3) + if arr[0] == "" { + return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + } + + switch len(arr) { + case 1: + // Just a destination path in the container + mp.Destination = filepath.Clean(arr[0]) + case 2: + if isValid := ValidMountMode(arr[1]); isValid { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. eg /foo:rw + return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + } + // Host Source Path or Name + Destination + mp.Source = arr[0] + mp.Destination = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + mp.Source = arr[0] + mp.Destination = arr[1] + mp.Mode = arr[2] // Mode field is used by SELinux to decide whether to apply label + if !ValidMountMode(mp.Mode) { + return nil, derr.ErrorCodeVolumeInvalidMode.WithArgs(mp.Mode) + } + mp.RW = ReadWrite(mp.Mode) + default: + return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + } + + //validate the volumes destination path + mp.Destination = filepath.Clean(mp.Destination) + if !filepath.IsAbs(mp.Destination) { + return nil, derr.ErrorCodeVolumeAbs.WithArgs(mp.Destination) + } + + // Destination cannot be "/" + if mp.Destination == "/" { + return nil, derr.ErrorCodeVolumeSlash.WithArgs(spec) + } + + name, source := ParseVolumeSource(mp.Source) + if len(source) == 0 { + mp.Source = "" // Clear it out as we previously assumed it was not a name + mp.Driver = volumeDriver + if len(mp.Driver) == 0 { + mp.Driver = DefaultDriverName + } + } else { + mp.Source = filepath.Clean(source) + } + + mp.Name = name + + return mp, nil +} + +// ParseVolumeSource parses the origin sources that's mounted into the container. +// It returns a name and a source. It looks to see if the spec passed in +// is an absolute file. If it is, it assumes the spec is a source. If not, +// it assumes the spec is a name. +func ParseVolumeSource(spec string) (string, string) { + if !filepath.IsAbs(spec) { + return spec, "" + } + return "", spec +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + return true, nil +} diff --git a/vendor/github.com/docker/docker/volume/volume_windows.go b/vendor/github.com/docker/docker/volume/volume_windows.go new file mode 100644 index 00000000..b6b3bbb0 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_windows.go @@ -0,0 +1,181 @@ +package volume + +import ( + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/errors" +) + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, +} + +// read-only modes +var roModes = map[string]bool{ + "ro": true, +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // RXHostDir is the first option of a source + RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*` + // RXName is the second option of a source + RXName = `[^\\/:*?"<>|\r\n]+` + // RXReservedNames are reserved names not possible on Windows + RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // RXSource is the combined possiblities for a source + RXSource = `((?P((` + RXHostDir + `)|(` + RXName + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // RXDestination is the regex expression for the mount destination + RXDestination = `(?P([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?))` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + // + + // RXMode is the regex expression for the mode of the mount + RXMode = `(:(?P(?i)rw))?` + // Temporarily for TP4, disabling the use of ro as it's not supported yet + // in the platform. TODO Windows: `(:(?P(?i)ro|rw))?` + // mode (optional) + // - Hopefully self explanatory in comparison to above. + // - Colon is not in the capture group + // +) + +// ParseMountSpec validates the configuration of mount information is valid. +func ParseMountSpec(spec string, volumeDriver string) (*MountPoint, error) { + var specExp = regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`) + + // Ensure in platform semantics for matching. The CLI will send in Unix semantics. + match := specExp.FindStringSubmatch(filepath.FromSlash(strings.ToLower(spec))) + + // Must have something back + if len(match) == 0 { + return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + } + + // Pull out the sub expressions from the named capture groups + matchgroups := make(map[string]string) + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + + mp := &MountPoint{ + Source: matchgroups["source"], + Destination: matchgroups["destination"], + RW: true, + } + if strings.ToLower(matchgroups["mode"]) == "ro" { + mp.RW = false + } + + // Volumes cannot include an explicitly supplied mode eg c:\path:rw + if mp.Source == "" && mp.Destination != "" && matchgroups["mode"] != "" { + return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) + } + + // Note: No need to check if destination is absolute as it must be by + // definition of matching the regex. + + if filepath.VolumeName(mp.Destination) == mp.Destination { + // Ensure the destination path, if a drive letter, is not the c drive + if strings.ToLower(mp.Destination) == "c:" { + return nil, derr.ErrorCodeVolumeDestIsC.WithArgs(spec) + } + } else { + // So we know the destination is a path, not drive letter. Clean it up. + mp.Destination = filepath.Clean(mp.Destination) + // Ensure the destination path, if a path, is not the c root directory + if strings.ToLower(mp.Destination) == `c:\` { + return nil, derr.ErrorCodeVolumeDestIsCRoot.WithArgs(spec) + } + } + + // See if the source is a name instead of a host directory + if len(mp.Source) > 0 { + validName, err := IsVolumeNameValid(mp.Source) + if err != nil { + return nil, err + } + if validName { + // OK, so the source is a name. + mp.Name = mp.Source + mp.Source = "" + + // Set the driver accordingly + mp.Driver = volumeDriver + if len(mp.Driver) == 0 { + mp.Driver = DefaultDriverName + } + } else { + // OK, so the source must be a host directory. Make sure it's clean. + mp.Source = filepath.Clean(mp.Source) + } + } + + // Ensure the host path source, if supplied, exists and is a directory + if len(mp.Source) > 0 { + var fi os.FileInfo + var err error + if fi, err = os.Stat(mp.Source); err != nil { + return nil, derr.ErrorCodeVolumeSourceNotFound.WithArgs(mp.Source, err) + } + if !fi.IsDir() { + return nil, derr.ErrorCodeVolumeSourceNotDirectory.WithArgs(mp.Source) + } + } + + logrus.Debugf("MP: Source '%s', Dest '%s', RW %t, Name '%s', Driver '%s'", mp.Source, mp.Destination, mp.RW, mp.Name, mp.Driver) + return mp, nil +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + nameExp := regexp.MustCompile(`^` + RXName + `$`) + if !nameExp.MatchString(name) { + return false, nil + } + nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`) + if nameExp.MatchString(name) { + return false, derr.ErrorCodeVolumeNameReservedWord.WithArgs(name) + } + return true, nil +} diff --git a/vendor/github.com/docker/libcompose/MAINTAINERS b/vendor/github.com/docker/libcompose/MAINTAINERS index 2237e6a0..5ffbc8eb 100644 --- a/vendor/github.com/docker/libcompose/MAINTAINERS +++ b/vendor/github.com/docker/libcompose/MAINTAINERS @@ -1,6 +1,58 @@ -Aanand Prasad (@aanand) -Adrien Duermael (@aduermael) -Daniel Nephin (@dnephin) -Darren Shepherd (@ibuildthecloud) -Gaetan de Villele (@gdevillele) -Vincent Demeester (@vdemeester) +# Libcompose maintainers file +# +# This file describes who runs the docker/libcompose project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aanand", + "aduermael", + "dnephin", + "ibuildthecloud", + "gdevillele", + "vdemeester", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aanand] + Name = "Aanand Prasad" + Email = "aanand@docker.com" + GitHub = "aanand" + + [people.aduermael] + Name = "Adrien Duermael" + Email = "adrien@docker.com" + GitHub = "aduermael" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.ibuildthecloud] + Name = "Darren Shepherd" + Email = "darren@rancher.com" + GitHub = "ibuildthecloud" + + [people.gdevillele] + Name = "Gaetan de Villele" + Email = "gaetan@docker.com" + GitHub = "gdevillele" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" diff --git a/vendor/github.com/docker/libcompose/docker/builder.go b/vendor/github.com/docker/libcompose/docker/builder.go index 1a1ec83e..1fd81ebf 100644 --- a/vendor/github.com/docker/libcompose/docker/builder.go +++ b/vendor/github.com/docker/libcompose/docker/builder.go @@ -136,10 +136,21 @@ func CreateTar(p *project.Project, name string) (io.ReadCloser, error) { return nil, fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile) } var includes = []string{"."} + var excludes []string - excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore")) + dockerIgnorePath := path.Join(root, ".dockerignore") + dockerIgnore, err := os.Open(dockerIgnorePath) if err != nil { - return nil, err + if !os.IsNotExist(err) { + return nil, err + } + logrus.Warnf("Error while reading .dockerignore (%s) : %s", dockerIgnorePath, err.Error()) + excludes = make([]string, 0) + } else { + excludes, err = utils.ReadDockerIgnore(dockerIgnore) + if err != nil { + return nil, err + } } // If .dockerignore mentions .dockerignore or the Dockerfile diff --git a/vendor/github.com/docker/libcompose/docker/container.go b/vendor/github.com/docker/libcompose/docker/container.go index deddb145..00f56be0 100644 --- a/vendor/github.com/docker/libcompose/docker/container.go +++ b/vendor/github.com/docker/libcompose/docker/container.go @@ -456,6 +456,7 @@ func (c *Container) Log() error { l := c.service.context.LoggerFactory.Create(c.name) err = c.client.Logs(dockerclient.LogsOptions{ + Container: c.name, Follow: true, Stdout: true, Stderr: true, @@ -464,6 +465,7 @@ func (c *Container) Log() error { ErrorStream: &logger.Wrapper{Logger: l, Err: true}, RawTerminal: info.Config.Tty, }) + logrus.WithFields(logrus.Fields{"Logger": l, "err": err}).Debug("c.client.Logs() returned error") return err } diff --git a/vendor/github.com/docker/libcompose/generate.go b/vendor/github.com/docker/libcompose/generate.go deleted file mode 100644 index 586cfca6..00000000 --- a/vendor/github.com/docker/libcompose/generate.go +++ /dev/null @@ -1,4 +0,0 @@ -package libcompose - -//go:generate mkdir -p vendor/github.com/docker/docker/autogen/dockerversion -//go:generate cp script/dockerversion vendor/github.com/docker/docker/autogen/dockerversion/dockerversion.go diff --git a/vendor/github.com/docker/libcompose/project/types_yaml.go b/vendor/github.com/docker/libcompose/project/types_yaml.go index 617063d3..4f4b2b1d 100644 --- a/vendor/github.com/docker/libcompose/project/types_yaml.go +++ b/vendor/github.com/docker/libcompose/project/types_yaml.go @@ -15,9 +15,6 @@ type Stringorslice struct { // MarshalYAML implements the Marshaller interface. func (s Stringorslice) MarshalYAML() (tag string, value interface{}, err error) { - if s.parts == nil { - return "", []string{}, nil - } return "", s.parts, nil } @@ -67,9 +64,6 @@ type Command struct { // MarshalYAML implements the Marshaller interface. func (s Command) MarshalYAML() (tag string, value interface{}, err error) { - if s.parts == nil { - return "", []string{}, nil - } return "", s.parts, nil } @@ -113,9 +107,6 @@ type SliceorMap struct { // MarshalYAML implements the Marshaller interface. func (s SliceorMap) MarshalYAML() (tag string, value interface{}, err error) { - if s.parts == nil { - return "", map[string]string{}, nil - } return "", s.parts, nil } @@ -169,9 +160,6 @@ type MaporEqualSlice struct { // MarshalYAML implements the Marshaller interface. func (s MaporEqualSlice) MarshalYAML() (tag string, value interface{}, err error) { - if s.parts == nil { - return "", []string{}, nil - } return "", s.parts, nil } @@ -214,9 +202,6 @@ type MaporColonSlice struct { // MarshalYAML implements the Marshaller interface. func (s MaporColonSlice) MarshalYAML() (tag string, value interface{}, err error) { - if s.parts == nil { - return "", []string{}, nil - } return "", s.parts, nil } @@ -259,9 +244,6 @@ type MaporSpaceSlice struct { // MarshalYAML implements the Marshaller interface. func (s MaporSpaceSlice) MarshalYAML() (tag string, value interface{}, err error) { - if s.parts == nil { - return "", []string{}, nil - } return "", s.parts, nil } diff --git a/vendor/github.com/docker/libcompose/project/types_yaml_test.go b/vendor/github.com/docker/libcompose/project/types_yaml_test.go index b035e4a2..d49a2b2d 100644 --- a/vendor/github.com/docker/libcompose/project/types_yaml_test.go +++ b/vendor/github.com/docker/libcompose/project/types_yaml_test.go @@ -120,6 +120,11 @@ type StructSliceorMap struct { Bars []string `yaml:"bars"` } +type StructCommand struct { + Entrypoint Command `yaml:"entrypoint,flow,omitempty"` + Command Command `yaml:"command,flow,omitempty"` +} + func TestSliceOrMapYaml(t *testing.T) { str := `{foos: [bar=baz, far=faz]}` @@ -192,3 +197,24 @@ func TestMaporsliceYaml(t *testing.T) { assert.True(t, contains(s2.Foo.parts, "bar=baz")) assert.True(t, contains(s2.Foo.parts, "far=faz")) } + +var sampleStructCommand = `command: bash` + +func TestUnmarshalCommand(t *testing.T) { + s := &StructCommand{} + err := yaml.Unmarshal([]byte(sampleStructCommand), s) + + assert.Nil(t, err) + assert.Equal(t, []string{"bash"}, s.Command.Slice()) + assert.Nil(t, s.Entrypoint.Slice()) + + bytes, err := yaml.Marshal(s) + assert.Nil(t, err) + + s2 := &StructCommand{} + err = yaml.Unmarshal(bytes, s2) + + assert.Nil(t, err) + assert.Equal(t, []string{"bash"}, s.Command.Slice()) + assert.Nil(t, s.Entrypoint.Slice()) +} diff --git a/vendor/github.com/docker/libnetwork/.gitignore b/vendor/github.com/docker/libnetwork/.gitignore index c03c9653..0e0d4892 100644 --- a/vendor/github.com/docker/libnetwork/.gitignore +++ b/vendor/github.com/docker/libnetwork/.gitignore @@ -4,6 +4,7 @@ *.so # Folders +integration-tmp/ _obj _test @@ -22,12 +23,14 @@ _testmain.go *.exe *.test *.prof +cmd/dnet/dnet # Coverage *.tmp *.coverprofile -# IDE files +# IDE files and folders .project +.settings/ libnetwork-build.created diff --git a/vendor/github.com/docker/libnetwork/Makefile b/vendor/github.com/docker/libnetwork/Makefile index deb510cf..b1eabf52 100644 --- a/vendor/github.com/docker/libnetwork/Makefile +++ b/vendor/github.com/docker/libnetwork/Makefile @@ -1,14 +1,25 @@ -.PHONY: all all-local build build-local check check-code check-format run-tests check-local install-deps coveralls circle-ci +.PHONY: all all-local build build-local check check-code check-format run-tests check-local integration-tests install-deps coveralls circle-ci start-services clean SHELL=/bin/bash -build_image=libnetwork-build +build_image=libnetworkbuild dockerargs = --privileged -v $(shell pwd):/go/src/github.com/docker/libnetwork -w /go/src/github.com/docker/libnetwork container_env = -e "INSIDECONTAINER=-incontainer=true" -docker = docker run --rm ${dockerargs} ${container_env} ${build_image} +docker = docker run --rm -it ${dockerargs} ${container_env} ${build_image} ciargs = -e "COVERALLS_TOKEN=$$COVERALLS_TOKEN" -e "INSIDECONTAINER=-incontainer=true" cidocker = docker run ${ciargs} ${dockerargs} golang:1.4 -all: ${build_image}.created - ${docker} make all-local +all: ${build_image}.created build check integration-tests clean + +integration-tests: ./cmd/dnet/dnet + @./test/integration/dnet/run-integration-tests.sh + +./cmd/dnet/dnet: + make build + +clean: + @if [ -e ./cmd/dnet/dnet ]; then \ + echo "Removing dnet binary"; \ + rm -rf ./cmd/dnet/dnet; \ + fi all-local: check-local build-local @@ -19,13 +30,16 @@ ${build_image}.created: touch ${build_image}.created build: ${build_image}.created - ${docker} make build-local + @echo "Building code... " + @${docker} ./wrapmake.sh build-local + @echo "Done building code" build-local: - $(shell which godep) go build -tags libnetwork_discovery ./... + @$(shell which godep) go build ./... + @$(shell which godep) go build -o ./cmd/dnet/dnet ./cmd/dnet check: ${build_image}.created - ${docker} make check-local + @${docker} ./wrapmake.sh check-local check-code: @echo "Checking code... " @@ -49,18 +63,18 @@ run-tests: ret=$$? ;\ if [ $$ret -ne 0 ]; then exit $$ret; fi ;\ popd &> /dev/null; \ - if [ -f $$dir/profile.tmp ]; then \ - cat $$dir/profile.tmp | tail -n +2 >> coverage.coverprofile ; \ + if [ -f $$dir/profile.tmp ]; then \ + cat $$dir/profile.tmp | tail -n +2 >> coverage.coverprofile ; \ rm $$dir/profile.tmp ; \ - fi ; \ - fi ; \ + fi ; \ + fi ; \ done @echo "Done running tests" -check-local: check-format check-code run-tests +check-local: check-format check-code start-services run-tests install-deps: - apt-get update && apt-get -y install iptables + apt-get update && apt-get -y install iptables zookeeperd git clone https://github.com/golang/tools /go/src/golang.org/x/tools go install golang.org/x/tools/cmd/vet go install golang.org/x/tools/cmd/goimports @@ -76,4 +90,8 @@ coveralls: # The following target is a workaround for this circle-ci: - @${cidocker} make install-deps check-local coveralls + @${cidocker} make install-deps build-local check-local coveralls + make integration-tests + +start-services: + service zookeeper start diff --git a/vendor/github.com/docker/libnetwork/README.md b/vendor/github.com/docker/libnetwork/README.md index 90fcbe01..aa3fcbce 100644 --- a/vendor/github.com/docker/libnetwork/README.md +++ b/vendor/github.com/docker/libnetwork/README.md @@ -17,59 +17,55 @@ There are many networking solutions available to suit a broad range of use-cases ```go - // Create a new controller instance - controller, err := libnetwork.New() - if err != nil { - return - } + // Select and configure the network driver + networkType := "bridge" - // Select and configure the network driver - networkType := "bridge" + // Create a new controller instance + driverOptions := options.Generic{} + genericOption := make(map[string]interface{}) + genericOption[netlabel.GenericData] = driverOptions + controller, err := libnetwork.New(config.OptionDriverConfig(networkType, genericOption)) + if err != nil { + return + } - driverOptions := options.Generic{} - genericOption := make(map[string]interface{}) - genericOption[netlabel.GenericData] = driverOptions - err := controller.ConfigureNetworkDriver(networkType, genericOption) - if err != nil { - return - } + // Create a network for containers to join. + // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can use. + network, err := controller.NewNetwork(networkType, "network1") + if err != nil { + return + } - // Create a network for containers to join. - // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make of - network, err := controller.NewNetwork(networkType, "network1") - if err != nil { - return - } + // For each new container: allocate IP and interfaces. The returned network + // settings will be used for container infos (inspect and such), as well as + // iptables rules for port publishing. This info is contained or accessible + // from the returned endpoint. + ep, err := network.CreateEndpoint("Endpoint1") + if err != nil { + return + } - // For each new container: allocate IP and interfaces. The returned network - // settings will be used for container infos (inspect and such), as well as - // iptables rules for port publishing. This info is contained or accessible - // from the returned endpoint. - ep, err := network.CreateEndpoint("Endpoint1") - if err != nil { - return - } + // Create the sandbox for the container. + // NewSandbox accepts Variadic optional arguments which libnetwork can use. + sbx, err := controller.NewSandbox("container1", + libnetwork.OptionHostname("test"), + libnetwork.OptionDomainname("docker.io")) - // A container can join the endpoint by providing the container ID to the join - // api. - // Join accepts Variadic arguments which will be made use of by libnetwork and Drivers - err = ep.Join("container1", - libnetwork.JoinOptionHostname("test"), - libnetwork.JoinOptionDomainname("docker.io")) - if err != nil { - return - } + // A sandbox can join the endpoint via the join api. + err = ep.Join(sbx) + if err != nil { + return + } - // libnetwork client can check the endpoint's operational data via the Info() API - epInfo, err := ep.DriverInfo() - mapData, ok := epInfo[netlabel.PortMap] + // libnetwork client can check the endpoint's operational data via the Info() API + epInfo, err := ep.DriverInfo() + mapData, ok := epInfo[netlabel.PortMap] + if ok { + portMapping, ok := mapData.([]types.PortBinding) if ok { - portMapping, ok := mapData.([]netutils.PortBinding) - if ok { - fmt.Printf("Current port mapping for endpoint %s: %v", ep.Name(), portMapping) - } + fmt.Printf("Current port mapping for endpoint %s: %v", ep.Name(), portMapping) } - + } ``` #### Current Status Please watch this space for updates on the progress. @@ -85,4 +81,3 @@ Want to hack on libnetwork? [Docker's contributions guidelines](https://github.c ## Copyright and license Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. - diff --git a/vendor/github.com/docker/libnetwork/Vagrantfile b/vendor/github.com/docker/libnetwork/Vagrantfile new file mode 100644 index 00000000..da09602d --- /dev/null +++ b/vendor/github.com/docker/libnetwork/Vagrantfile @@ -0,0 +1,58 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +$consul=<