bump godeps and lots of fixes...

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2016-01-22 11:45:45 +01:00
parent f2aedbe15f
commit c07c167006
22 changed files with 830 additions and 679 deletions

132
Godeps/Godeps.json generated
View File

@ -17,168 +17,168 @@
}, },
{ {
"ImportPath": "github.com/docker/distribution", "ImportPath": "github.com/docker/distribution",
"Comment": "v2.2.0-199-g0865082", "Comment": "v2.2.0-207-gcaa2001",
"Rev": "08650825fef9f21ea819972fb2ed875c0832a255" "Rev": "caa2001e1fa738e14be6ba5f89cd9d41aebcd204"
}, },
{ {
"ImportPath": "github.com/docker/docker/api", "ImportPath": "github.com/docker/docker/api",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/cliconfig", "ImportPath": "github.com/docker/docker/cliconfig",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/daemon/graphdriver", "ImportPath": "github.com/docker/docker/daemon/graphdriver",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/distribution", "ImportPath": "github.com/docker/docker/distribution",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/dockerversion", "ImportPath": "github.com/docker/docker/dockerversion",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/image", "ImportPath": "github.com/docker/docker/image",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/layer", "ImportPath": "github.com/docker/docker/layer",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/opts", "ImportPath": "github.com/docker/docker/opts",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/archive", "ImportPath": "github.com/docker/docker/pkg/archive",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/chrootarchive", "ImportPath": "github.com/docker/docker/pkg/chrootarchive",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/fileutils", "ImportPath": "github.com/docker/docker/pkg/fileutils",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/homedir", "ImportPath": "github.com/docker/docker/pkg/homedir",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/httputils", "ImportPath": "github.com/docker/docker/pkg/httputils",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/idtools", "ImportPath": "github.com/docker/docker/pkg/idtools",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/ioutils", "ImportPath": "github.com/docker/docker/pkg/ioutils",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/jsonlog", "ImportPath": "github.com/docker/docker/pkg/jsonlog",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/jsonmessage", "ImportPath": "github.com/docker/docker/pkg/jsonmessage",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/mflag", "ImportPath": "github.com/docker/docker/pkg/mflag",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/parsers/kernel", "ImportPath": "github.com/docker/docker/pkg/parsers/kernel",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/pools", "ImportPath": "github.com/docker/docker/pkg/pools",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/progress", "ImportPath": "github.com/docker/docker/pkg/progress",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/promise", "ImportPath": "github.com/docker/docker/pkg/promise",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/random", "ImportPath": "github.com/docker/docker/pkg/random",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/reexec", "ImportPath": "github.com/docker/docker/pkg/reexec",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/stringid", "ImportPath": "github.com/docker/docker/pkg/stringid",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/system", "ImportPath": "github.com/docker/docker/pkg/system",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/tarsum", "ImportPath": "github.com/docker/docker/pkg/tarsum",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/term", "ImportPath": "github.com/docker/docker/pkg/term",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/useragent", "ImportPath": "github.com/docker/docker/pkg/useragent",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/version", "ImportPath": "github.com/docker/docker/pkg/version",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/reference", "ImportPath": "github.com/docker/docker/reference",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/docker/registry", "ImportPath": "github.com/docker/docker/registry",
"Comment": "v1.4.1-9391-g5537a92", "Comment": "v1.4.1-9441-gc3a9ece",
"Rev": "5537a92e450ea56e2002f83ff50bb70fdb2cc25e" "Rev": "c3a9ecedba41eef71e42bed33fc2b462b9ee1c6a"
}, },
{ {
"ImportPath": "github.com/docker/engine-api/types", "ImportPath": "github.com/docker/engine-api/types",

View File

@ -112,6 +112,18 @@ information about each option that appears later in this page.
region: fr region: fr
container: containername container: containername
rootdirectory: /swift/object/name/prefix rootdirectory: /swift/object/name/prefix
oss:
accesskeyid: accesskeyid
accesskeysecret: accesskeysecret
region: OSS region name
endpoint: optional endpoints
internal: optional internal endpoint
bucket: OSS bucket
encrypt: optional data encryption setting
secure: optional ssl setting
chunksize: optional size valye
rootdirectory: optional root directory
inmemory: # This driver takes no parameters
delete: delete:
enabled: false enabled: false
redirect: redirect:
@ -355,7 +367,7 @@ Permitted values are `error`, `warn`, `info` and `debug`. The default is
swift: swift:
username: username username: username
password: password password: password
authurl: https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth
tenant: tenantname tenant: tenantname
tenantid: tenantid tenantid: tenantid
domain: domain name for Openstack Identity v3 API domain: domain name for Openstack Identity v3 API
@ -364,6 +376,18 @@ Permitted values are `error`, `warn`, `info` and `debug`. The default is
region: fr region: fr
container: containername container: containername
rootdirectory: /swift/object/name/prefix rootdirectory: /swift/object/name/prefix
oss:
accesskeyid: accesskeyid
accesskeysecret: accesskeysecret
region: OSS region name
endpoint: optional endpoints
internal: optional internal endpoint
bucket: OSS bucket
encrypt: optional data encryption setting
secure: optional ssl setting
chunksize: optional size valye
rootdirectory: optional root directory
inmemory:
delete: delete:
enabled: false enabled: false
cache: cache:
@ -378,9 +402,63 @@ Permitted values are `error`, `warn`, `info` and `debug`. The default is
disable: false disable: false
The storage option is **required** and defines which storage backend is in use. The storage option is **required** and defines which storage backend is in use.
You must configure one backend; if you configure more, the registry returns an error. You must configure one backend; if you configure more, the registry returns an error. You can choose any of these backend storage drivers:
If you are deploying a registry on Windows, be aware that a Windows volume mounted from the host is not recommended. Instead, you can use a S3, or Azure, backing data-store. If you do use a Windows volume, you must ensure that the `PATH` to the mount point is within Windows' `MAX_PATH` limits (typically 255 characters). Failure to do so can result in the following error message: <table>
<tr>
<td><code>filesystem</code></td>
<td>Uses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications.
See the <a href="../storage-drivers/filesystem/">driver's reference documentation</a>.
</td>
</tr>
<tr>
<td><code>azure</code></td>
<td>Uses Microsoft's Azure Blob Storage.
See the <a href="../storage-drivers/azure/">driver's reference documentation</a>.
</td>
</tr>
<tr>
<td><code>gcs</code></td>
<td>Uses Google Cloud Storage.
See the <a href="../storage-drivers/gcs/">driver's reference documentation</a>.
</td>
</tr>
<tr>
<td><code>rados</code></td>
<td>Uses Ceph Object Storage.
See the <a href="../storage-drivers/rados/">driver's reference documentation</a>.
</td>
</tr>
<tr>
<td><code>s3</code></td>
<td>Uses Amazon's Simple Storage Service (S3).
See the <a href="../storage-drivers/s3/">driver's reference documentation</a>.
</td>
</tr>
<tr>
<td><code>swift</code></td>
<td>Uses Openstack Swift object storage.
See the <a href="../storage-drivers/swift/">driver's reference documentation</a>.
</td>
</tr>
<tr>
<td><code>oss</code></td>
<td>Uses Aliyun OSS for object storage.
See the <a href="../storage-drivers/oss/">driver's reference documentation</a>.
</td>
</tr>
</table>
For purely tests purposes, you can use the [`inmemory` storage
driver](storage-drivers/inmemory.md). If you would like to run a registry from
volatile memory, use the [`filesystem` driver](storage-drivers/filesystem.md) on
a ramdisk.
If you are deploying a registry on Windows, be aware that a Windows volume
mounted from the host is not recommended. Instead, you can use a S3, or Azure,
backing data-store. If you do use a Windows volume, you must ensure that the
`PATH` to the mount point is within Windows' `MAX_PATH` limits (typically 255
characters). Failure to do so can result in the following error message:
mkdir /XXX protocol error and your registry will not function properly. mkdir /XXX protocol error and your registry will not function properly.
@ -446,7 +524,7 @@ The `redirect` subsection provides configuration for managing redirects from
content backends. For backends that support it, redirecting is enabled by content backends. For backends that support it, redirecting is enabled by
default. Certain deployment scenarios may prefer to route all data through the default. Certain deployment scenarios may prefer to route all data through the
Registry, rather than redirecting to the backend. This may be more efficient Registry, rather than redirecting to the backend. This may be more efficient
when using a backend that is not colocated or when a registry instance is when using a backend that is not co-located or when a registry instance is
doing aggressive caching. doing aggressive caching.
Redirects can be disabled by adding a single flag `disable`, set to `true` Redirects can be disabled by adding a single flag `disable`, set to `true`
@ -455,459 +533,6 @@ under the `redirect` section:
redirect: redirect:
disable: true disable: true
### filesystem
The `filesystem` storage backend uses the local disk to store registry files. It
is ideal for development and may be appropriate for some small-scale production
applications.
This backend has a single, required `rootdirectory` parameter. The parameter
specifies the absolute path to a directory. The registry stores all its data
here so make sure there is adequate space available.
### azure
This storage backend uses Microsoft's Azure Blob Storage.
<table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>accountname</code>
</td>
<td>
yes
</td>
<td>
Azure account name.
</td>
</tr>
<tr>
<td>
<code>accountkey</code>
</td>
<td>
yes
</td>
<td>
Azure account key.
</td>
</tr>
<tr>
<td>
<code>container</code>
</td>
<td>
yes
</td>
<td>
Name of the Azure container into which to store data.
</td>
</tr>
<tr>
<td>
<code>realm</code>
</td>
<td>
no
</td>
<td>
Domain name suffix for the Storage Service API endpoint. By default, this
is <code>core.windows.net</code>.
</td>
</tr>
</table>
### gcs
This storage backend uses Google Cloud Storage.
<table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>bucket</code>
</td>
<td>
yes
</td>
<td>
Storage bucket name.
</td>
</tr>
<tr>
<td>
<code>keyfile</code>
</td>
<td>
no
</td>
<td>
A private service account key file in JSON format. Instead of a key file <a href="https://developers.google.com/identity/protocols/application-default-credentials">Google Application Default Credentials</a> can be used.
</td>
</tr>
<tr>
<td>
<code>rootdirectory</code>
</td>
<td>
no
</td>
<td>
This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary.
</tr>
</table>
### rados
This storage backend uses [Ceph Object Storage](http://ceph.com/docs/master/rados/).
<table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>poolname</code>
</td>
<td>
yes
</td>
<td>
Ceph pool name.
</td>
</tr>
<tr>
<td>
<code>username</code>
</td>
<td>
no
</td>
<td>
Ceph cluster user to connect as (i.e. admin, not client.admin).
</td>
</tr>
<tr>
<td>
<code>chunksize</code>
</td>
<td>
no
</td>
<td>
Size of the written RADOS objects. Default value is 4MB (4194304).
</td>
</tr>
</table>
### S3
This storage backend uses Amazon's Simple Storage Service (S3).
<table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>accesskey</code>
</td>
<td>
yes
</td>
<td>
Your AWS Access Key.
</td>
</tr>
<tr>
<td>
<code>secretkey</code>
</td>
<td>
yes
</td>
<td>
Your AWS Secret Key.
</td>
</tr>
<tr>
<td>
<code>region</code>
</td>
<td>
yes
</td>
<td>
The AWS region in which your bucket exists. For the moment, the Go AWS
library in use does not use the newer DNS based bucket routing.
</td>
</tr>
<tr>
<td>
<code>bucket</code>
</td>
<td>
yes
</td>
<td>
The bucket name in which you want to store the registry's data.
</td>
</tr>
<tr>
<td>
<code>encrypt</code>
</td>
<td>
no
</td>
<td>
Specifies whether the registry stores the image in encrypted format or
not. A boolean value. The default is false.
</td>
</tr>
<tr>
<td>
<code>secure</code>
</td>
<td>
no
</td>
<td>
Indicates whether to use HTTPS instead of HTTP. A boolean value. The
default is <code>true</code>.
</td>
</tr>
<tr>
<td>
<code>v4auth</code>
</td>
<td>
no
</td>
<td>
Indicates whether the registry uses Version 4 of AWS's authentication.
Generally, you should set this to <code>true</code>. By default, this is
<code>false</code>.
</td>
</tr>
<tr>
<td>
<code>chunksize</code>
</td>
<td>
no
</td>
<td>
The S3 API requires multipart upload chunks to be at least 5MB. This value
should be a number that is larger than 5*1024*1024.
</td>
</tr>
<tr>
<td>
<code>rootdirectory</code>
</td>
<td>
no
</td>
<td>
This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary.
</td>
</tr>
</table>
### Openstack Swift
This storage backend uses Openstack Swift object storage.
<table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>authurl</code>
</td>
<td>
yes
</td>
<td>
URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth
</td>
</tr>
<tr>
<td>
<code>username</code>
</td>
<td>
yes
</td>
<td>
Your Openstack user name.
</td>
</tr>
<tr>
<td>
<code>password</code>
</td>
<td>
yes
</td>
<td>
Your Openstack password.
</td>
</tr>
<tr>
<td>
<code>region</code>
</td>
<td>
no
</td>
<td>
The Openstack region in which your container exists.
</td>
</tr>
<tr>
<td>
<code>container</code>
</td>
<td>
yes
</td>
<td>
The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization.
</td>
</tr>
<tr>
<td>
<code>tenant</code>
</td>
<td>
no
</td>
<td>
Your Openstack tenant name. You can either use <code>tenant</code> or <code>tenantid</code>.
</td>
</tr>
<tr>
<td>
<code>tenantid</code>
</td>
<td>
no
</td>
<td>
Your Openstack tenant id. You can either use <code>tenant</code> or <code>tenantid</code>.
</td>
</tr>
<tr>
<td>
<code>domain</code>
</td>
<td>
no
</td>
<td>
Your Openstack domain name for Identity v3 API. You can either use <code>domain</code> or <code>domainid</code>.
</td>
</tr>
<tr>
<td>
<code>domainid</code>
</td>
<td>
no
</td>
<td>
Your Openstack domain id for Identity v3 API. You can either use <code>domain</code> or <code>domainid</code>.
</td>
</tr>
<tr>
<td>
<code>trustid</code>
</td>
<td>
no
</td>
<td>
Your Openstack trust id for Identity v3 API.
</td>
</tr>
<tr>
<td>
<code>insecureskipverify</code>
</td>
<td>
no
</td>
<td>
true to skip TLS verification, false by default.
</td>
</tr>
<tr>
<td>
<code>chunksize</code>
</td>
<td>
no
</td>
<td>
Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M).
</td>
</tr>
<tr>
<td>
<code>prefix</code>
</td>
<td>
no
</td>
<td>
This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root.
</td>
</tr>
<tr>
<td>
<code>secretkey</code>
</td>
<td>
no
</td>
<td>
The secret key used to generate temporary URLs.
</td>
</tr>
<tr>
<td>
<code>accesskey</code>
</td>
<td>
no
</td>
<td>
The access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter.
</td>
</tr>
</table>
## auth ## auth
@ -1035,11 +660,12 @@ For more information about Token based authentication configuration, see the [sp
### htpasswd ### htpasswd
The _htpasswd_ authentication backed allows one to configure basic auth using an The _htpasswd_ authentication backed allows one to configure basic auth using an
[Apache HTPasswd File](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). [Apache htpasswd
Only [`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are file](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). Only
supported. Entries with other hash types will be ignored. The htpasswd file is [`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are supported.
loaded once, at startup. If the file is invalid, the registry will display an Entries with other hash types will be ignored. The htpasswd file is loaded once,
error and will not start. at startup. If the file is invalid, the registry will display an error and will
not start.
> __WARNING:__ This authentication scheme should only be used with TLS > __WARNING:__ This authentication scheme should only be used with TLS
> configured, since basic authentication sends passwords as part of the http > configured, since basic authentication sends passwords as part of the http
@ -1078,7 +704,7 @@ error and will not start.
## middleware ## middleware
The `middleware` option is **optional**. Use this option to inject middleware at The `middleware` option is **optional**. Use this option to inject middleware at
named hook points. All middlewares must implement the same interface as the named hook points. All middleware must implement the same interface as the
object they're wrapping. This means a registry middleware must implement the object they're wrapping. This means a registry middleware must implement the
`distribution.Namespace` interface, repository middleware must implement `distribution.Namespace` interface, repository middleware must implement
`distribution.Repository`, and storage middleware must implement `distribution.Repository`, and storage middleware must implement

View File

@ -9,16 +9,68 @@ keywords = ["registry, service, driver, images, storage, azure"]
# Microsoft Azure storage driver # Microsoft Azure storage driver
An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage. An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/) for object storage.
## Parameters ## Parameters
The following parameters must be used to authenticate and configure the storage driver (case-sensitive): <table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>accountname</code>
</td>
<td>
yes
</td>
<td>
Name of the Azure Storage Account.
</td>
</tr>
<tr>
<td>
<code>accountkey</code>
</td>
<td>
yes
</td>
<td>
Primary or Secondary Key for the Storage Account.
</td>
</tr>
<tr>
<td>
<code>container</code>
</td>
<td>
yes
</td>
<td>
Name of the Azure root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api].
</td>
</tr>
<tr>
<td>
<code>realm</code>
</td>
<td>
no
</td>
<td>
Domain name suffix for the Storage Service API endpoint. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. By default, this
is <code>core.windows.net</code>.
</td>
</tr>
* `accountname`: Name of the Azure Storage Account. </table>
* `accountkey`: Primary or Secondary Key for the Storage Account.
* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api].
* `realm`: (optional) Domain name suffix for the Storage Service API endpoint. Defaults to `core.windows.net`. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`.
[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/
[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx ## Related Information
* To get information about
[azure-blob-storage](http://azure.microsoft.com/en-us/services/storage/) visit
the Microsoft website.
* You can use Microsoft's [Blob Service REST API](https://msdn.microsoft.com/en-us/library/azure/dd135733.aspx) to [create a container] (https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx).

View File

@ -13,4 +13,6 @@ An implementation of the `storagedriver.StorageDriver` interface which uses the
## Parameters ## Parameters
`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/var/lib/registry`. `rootdirectory`: (optional) The absolute path to a root directory tree in which
to store all registry files. The registry stores all its data here so make sure
there is adequate space available. Defaults to `/var/lib/registry`.

View File

@ -13,6 +13,49 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Goog
## Parameters ## Parameters
<table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>bucket</code>
</td>
<td>
yes
</td>
<td>
Storage bucket name.
</td>
</tr>
<tr>
<td>
<code>keyfile</code>
</td>
<td>
no
</td>
<td>
A private service account key file in JSON format. Instead of a key file <a href="https://developers.google.com/identity/protocols/application-default-credentials">Google Application Default Credentials</a> can be used.
</td>
</tr>
<tr>
<td>
<code>rootdirectory</code>
</td>
<td>
no
</td>
<td>
This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary.
</tr>
</table>
`bucket`: The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization). `bucket`: The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization).
`keyfile`: (optional) A private key file in JSON format, used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts). `keyfile`: (optional) A private key file in JSON format, used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts).

View File

@ -0,0 +1,7 @@
<!--[metadata]>
+++
draft=true
title = "List of storage drivers"
description = "Placeholder for redesign"
+++
<![end-metadata]-->

View File

@ -7,11 +7,14 @@ keywords = ["registry, service, driver, images, storage, in-memory"]
<![end-metadata]--> <![end-metadata]-->
# In-memory storage driver # In-memory storage driver (Testing Only)
An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. For purely tests purposes, you can use the `inmemory` storage driver. This
driver is an implementation of the `storagedriver.StorageDriver` interface which
uses local memory for object storage. If you would like to run a registry from
volatile memory, use the [`filesystem` driver](filesystem.md) on a ramdisk.
**IMPORTANT**: This storage driver *does not* persist data across runs, and primarily exists for testing. **IMPORTANT**: This storage driver *does not* persist data across runs. This is why it is only suitable for testing. *Never* use this driver in production.
## Parameters ## Parameters

View File

@ -12,22 +12,113 @@ An implementation of the `storagedriver.StorageDriver` interface which uses [Ali
## Parameters ## Parameters
* `accesskeyid`: Your access key ID. <table>
<tr>
* `accesskeysecret`: Your access key secret. <th>Parameter</th>
<th>Required</th>
* `region`: The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at <http://docs.aliyun.com/#/oss/product-documentation/domain-region> <th>Description</th>
</tr>
* `endpoint`: (optional) By default, the endpoint shoulb be `<bucket>.<region>.aliyuncs.com` or `<bucket>.<region>-internal.aliyuncs.com` (when internal=true). You can change the default endpoint via changing this value. <tr>
<td>
* `internal`: (optional) Using internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at <http://docs.aliyun.com/#/oss/product-documentation/domain-region> <code>accesskeyid</code>
</td>
* `bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). <td>
yes
* `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). </td>
<td>
* `secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true if not specified. Your access key ID.
</td>
* `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. </tr>
<tr>
* `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). <td>
<code>accesskeysecret</code>
</td>
<td>
yes
</td>
<td>
Your access key secret.
</td>
</tr>
<tr>
<td>
<code>region</code>
</td>
<td>
yes
</td>
<td> The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at <http://docs.aliyun.com/#/oss/product-documentation/domain-region>
</td>
</tr>
<tr>
<td>
<code>endpoint</code>
</td>
<td>
no
</td>
<td>
An endpoint which defaults to `<bucket>.<region>.aliyuncs.com` or `<bucket>.<region>-internal.aliyuncs.com` (when `internal=true`). You can change the default endpoint by changing this value.
</td>
</tr>
<tr>
<td>
<code>internal</code>
</td>
<td>
no
</td>
<td> An internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at <http://docs.aliyun.com/#/oss/product-documentation/domain-region>
</td>
</tr>
<tr>
<td>
<code>bucket</code>
</td>
<td>
yes
</td>
<td> The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization).
</td>
</tr>
<tr>
<td>
<code>encrypt</code>
</td>
<td>
no
</td>
<td> Specifies whether you would like your data encrypted on the server side. Defaults to false if not specified.
</td>
</tr>
<tr>
<td>
<code>secure</code>
</td>
<td>
no
</td>
<td> Specifies whether to transfer data to the bucket over ssl or not. If you omit this value, `true` is used.
</td>
</tr>
<tr>
<td>
<code>chunksize</code>
</td>
<td>
no
</td>
<td> The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS.
</td>
</tr>
<tr>
<td>
<code>rootdirectory</code>
</td>
<td>
no
</td>
<td> The root directory tree in which to store all registry files. Defaults to an empty string (bucket root).
</td>
</tr>
</table>

View File

@ -14,6 +14,49 @@ An implementation of the `storagedriver.StorageDriver` interface which uses
## Parameters ## Parameters
<table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>poolname</code>
</td>
<td>
yes
</td>
<td>
Ceph pool name.
</td>
</tr>
<tr>
<td>
<code>username</code>
</td>
<td>
no
</td>
<td>
Ceph cluster user to connect as (i.e. admin, not client.admin).
</td>
</tr>
<tr>
<td>
<code>chunksize</code>
</td>
<td>
no
</td>
<td>
Size of the written RADOS objects. Default value is 4MB (4194304).
</td>
</tr>
</table>
The following parameters must be used to configure the storage driver The following parameters must be used to configure the storage driver
(case-sensitive): (case-sensitive):

View File

@ -13,6 +13,120 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Amaz
## Parameters ## Parameters
<table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>accesskey</code>
</td>
<td>
yes
</td>
<td>
Your AWS Access Key.
</td>
</tr>
<tr>
<td>
<code>secretkey</code>
</td>
<td>
yes
</td>
<td>
Your AWS Secret Key.
</td>
</tr>
<tr>
<td>
<code>region</code>
</td>
<td>
yes
</td>
<td>
The AWS region in which your bucket exists. For the moment, the Go AWS
library in use does not use the newer DNS based bucket routing.
</td>
</tr>
<tr>
<td>
<code>bucket</code>
</td>
<td>
yes
</td>
<td>
The bucket name in which you want to store the registry's data.
</td>
</tr>
<tr>
<td>
<code>encrypt</code>
</td>
<td>
no
</td>
<td>
Specifies whether the registry stores the image in encrypted format or
not. A boolean value. The default is false.
</td>
</tr>
<tr>
<td>
<code>secure</code>
</td>
<td>
no
</td>
<td>
Indicates whether to use HTTPS instead of HTTP. A boolean value. The
default is <code>true</code>.
</td>
</tr>
<tr>
<td>
<code>v4auth</code>
</td>
<td>
no
</td>
<td>
Indicates whether the registry uses Version 4 of AWS's authentication.
Generally, you should set this to <code>true</code>. By default, this is
<code>false</code>.
</td>
</tr>
<tr>
<td>
<code>chunksize</code>
</td>
<td>
no
</td>
<td>
The S3 API requires multipart upload chunks to be at least 5MB. This value
should be a number that is larger than 5*1024*1024.
</td>
</tr>
<tr>
<td>
<code>rootdirectory</code>
</td>
<td>
no
</td>
<td>
This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary.
</td>
</tr>
</table>
`accesskey`: Your aws access key. `accesskey`: Your aws access key.
`secretkey`: Your aws secret key. `secretkey`: Your aws secret key.
@ -80,4 +194,4 @@ middleware:
## CloudFront Key-Pair ## CloudFront Key-Pair
A CloudFront key-pair is required for all AWS accounts needing access to your CloudFront distribution. For information, please see [Creating CloudFront Key Pairs](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs). A CloudFront key-pair is required for all AWS accounts needing access to your CloudFront distribution. For information, please see [Creating CloudFront Key Pairs](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs).

View File

@ -13,6 +13,181 @@ An implementation of the `storagedriver.StorageDriver` interface that uses [Open
## Parameters ## Parameters
<table>
<tr>
<th>Parameter</th>
<th>Required</th>
<th>Description</th>
</tr>
<tr>
<td>
<code>authurl</code>
</td>
<td>
yes
</td>
<td>
URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth
</td>
</tr>
<tr>
<td>
<code>username</code>
</td>
<td>
yes
</td>
<td>
Your Openstack user name.
</td>
</tr>
<tr>
<td>
<code>password</code>
</td>
<td>
yes
</td>
<td>
Your Openstack password.
</td>
</tr>
<tr>
<td>
<code>region</code>
</td>
<td>
no
</td>
<td>
The Openstack region in which your container exists.
</td>
</tr>
<tr>
<td>
<code>container</code>
</td>
<td>
yes
</td>
<td>
The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization.
</td>
</tr>
<tr>
<td>
<code>tenant</code>
</td>
<td>
no
</td>
<td>
Your Openstack tenant name. You can either use <code>tenant</code> or <code>tenantid</code>.
</td>
</tr>
<tr>
<td>
<code>tenantid</code>
</td>
<td>
no
</td>
<td>
Your Openstack tenant id. You can either use <code>tenant</code> or <code>tenantid</code>.
</td>
</tr>
<tr>
<td>
<code>domain</code>
</td>
<td>
no
</td>
<td>
Your Openstack domain name for Identity v3 API. You can either use <code>domain</code> or <code>domainid</code>.
</td>
</tr>
<tr>
<td>
<code>domainid</code>
</td>
<td>
no
</td>
<td>
Your Openstack domain id for Identity v3 API. You can either use <code>domain</code> or <code>domainid</code>.
</td>
</tr>
<tr>
<td>
<code>trustid</code>
</td>
<td>
no
</td>
<td>
Your Openstack trust id for Identity v3 API.
</td>
</tr>
<tr>
<td>
<code>insecureskipverify</code>
</td>
<td>
no
</td>
<td>
true to skip TLS verification, false by default.
</td>
</tr>
<tr>
<td>
<code>chunksize</code>
</td>
<td>
no
</td>
<td>
Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M).
</td>
</tr>
<tr>
<td>
<code>prefix</code>
</td>
<td>
no
</td>
<td>
This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root.
</td>
</tr>
<tr>
<td>
<code>secretkey</code>
</td>
<td>
no
</td>
<td>
The secret key used to generate temporary URLs.
</td>
</tr>
<tr>
<td>
<code>accesskey</code>
</td>
<td>
no
</td>
<td>
The access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter.
</td>
</tr>
</table>
<table> <table>
<tr> <tr>
<td> <td>

View File

@ -2,7 +2,7 @@ package distribution
import ( import (
"fmt" "fmt"
"strings" "mime"
"github.com/docker/distribution/context" "github.com/docker/distribution/context"
"github.com/docker/distribution/digest" "github.com/docker/distribution/digest"
@ -84,19 +84,23 @@ var mappings = make(map[string]UnmarshalFunc, 0)
// UnmarshalManifest looks up manifest unmarshall functions based on // UnmarshalManifest looks up manifest unmarshall functions based on
// MediaType // MediaType
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
// Need to look up by the actual content type, not the raw contents of // Need to look up by the actual media type, not the raw contents of
// the header. Strip semicolons and anything following them. // the header. Strip semicolons and anything following them.
var mediatype string var mediatype string
semicolonIndex := strings.Index(ctHeader, ";") if ctHeader != "" {
if semicolonIndex != -1 { var err error
mediatype = ctHeader[:semicolonIndex] mediatype, _, err = mime.ParseMediaType(ctHeader)
} else { if err != nil {
mediatype = ctHeader return nil, Descriptor{}, err
}
} }
unmarshalFunc, ok := mappings[mediatype] unmarshalFunc, ok := mappings[mediatype]
if !ok { if !ok {
return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype: %s", mediatype) unmarshalFunc, ok = mappings[""]
if !ok {
return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype)
}
} }
return unmarshalFunc(p) return unmarshalFunc(p)

View File

@ -103,10 +103,16 @@ func (b *bridge) createManifestEvent(action string, repo string, sm distribution
return nil, err return nil, err
} }
// Ensure we have the canonical manifest descriptor here
_, desc, err := distribution.UnmarshalManifest(mt, p)
if err != nil {
return nil, err
}
event.Target.MediaType = mt event.Target.MediaType = mt
event.Target.Length = int64(len(p)) event.Target.Length = desc.Size
event.Target.Size = int64(len(p)) event.Target.Size = desc.Size
event.Target.Digest = digest.FromBytes(p) event.Target.Digest = desc.Digest
event.Target.URL, err = b.ub.BuildManifestURL(repo, event.Target.Digest.String()) event.Target.URL, err = b.ub.BuildManifestURL(repo, event.Target.Digest.String())
if err != nil { if err != nil {

View File

@ -23,7 +23,7 @@ func (cli *DockerCli) CmdUpdate(args ...string) error {
flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit")
flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit")
cmd.Require(flag.Min, 1) cmd.Require(flag.Min, 1)

View File

@ -374,20 +374,10 @@ func (a *Driver) DiffPath(id string) (string, func() error, error) {
} }
func (a *Driver) applyDiff(id string, diff archive.Reader) error { func (a *Driver) applyDiff(id string, diff archive.Reader) error {
dir := path.Join(a.rootPath(), "diff", id) return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
if err := chrootarchive.UntarUncompressed(diff, dir, &archive.TarOptions{
UIDMaps: a.uidMaps, UIDMaps: a.uidMaps,
GIDMaps: a.gidMaps, GIDMaps: a.gidMaps,
}); err != nil { })
return err
}
// show invalid whiteouts warning.
files, err := ioutil.ReadDir(path.Join(dir, archive.WhiteoutLinkDir))
if err == nil && len(files) > 0 {
logrus.Warnf("Archive contains aufs hardlink references that are not supported.")
}
return nil
} }
// DiffSize calculates the changes between the specified id // DiffSize calculates the changes between the specified id
@ -517,7 +507,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
} }
if firstMount { if firstMount {
opts := "dio,noplink,xino=/dev/shm/aufs.xino" opts := "dio,xino=/dev/shm/aufs.xino"
if useDirperm() { if useDirperm() {
opts += ",dirperm1" opts += ",dirperm1"
} }

View File

@ -4,12 +4,12 @@ package overlay
import ( import (
"fmt" "fmt"
"io"
"os" "os"
"path/filepath" "path/filepath"
"syscall" "syscall"
"time" "time"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
) )
@ -32,7 +32,7 @@ func copyRegular(srcPath, dstPath string, mode os.FileMode) error {
} }
defer dstFile.Close() defer dstFile.Close()
_, err = io.Copy(dstFile, srcFile) _, err = pools.Copy(dstFile, srcFile)
return err return err
} }

View File

@ -6,6 +6,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
"syscall"
"time" "time"
"github.com/docker/distribution" "github.com/docker/distribution"
@ -145,8 +146,14 @@ func retryOnError(err error) error {
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied: case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied:
return xfer.DoNotRetry{Err: err} return xfer.DoNotRetry{Err: err}
} }
case *url.Error:
return retryOnError(v.Err)
case *client.UnexpectedHTTPResponseError: case *client.UnexpectedHTTPResponseError:
return xfer.DoNotRetry{Err: err} return xfer.DoNotRetry{Err: err}
case error:
if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) {
return xfer.DoNotRetry{Err: err}
}
} }
// let's be nice and fallback if the error is a completely // let's be nice and fallback if the error is a completely
// unexpected one. // unexpected one.

View File

@ -1163,7 +1163,7 @@ func (fs *FlagSet) ReportError(str string, withHelp bool) {
str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'"
} }
} }
fmt.Fprintf(fs.Out(), "docker: %s.\n", str) fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str)
} }
// Parsed reports whether fs.Parse has been called. // Parsed reports whether fs.Parse has been called.

View File

@ -109,7 +109,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
keyName := certName[:len(certName)-5] + ".key" keyName := certName[:len(certName)-5] + ".key"
logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) logrus.Debugf("cert: %s", filepath.Join(directory, f.Name()))
if !hasFile(fs, keyName) { if !hasFile(fs, keyName) {
return fmt.Errorf("Missing key %s for certificate %s", keyName, certName) return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName)
} }
cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName))
if err != nil { if err != nil {
@ -122,7 +122,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
certName := keyName[:len(keyName)-4] + ".cert" certName := keyName[:len(keyName)-4] + ".cert"
logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) logrus.Debugf("key: %s", filepath.Join(directory, f.Name()))
if !hasFile(fs, certName) { if !hasFile(fs, certName) {
return fmt.Errorf("Missing certificate %s for key %s", certName, keyName) return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName)
} }
} }
} }

View File

@ -4,6 +4,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
"strings"
"time" "time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
@ -13,7 +14,6 @@ import (
"github.com/docker/docker/api" "github.com/docker/docker/api"
"github.com/docker/docker/cliconfig" "github.com/docker/docker/cliconfig"
"github.com/docker/docker/image" "github.com/docker/docker/image"
"github.com/docker/docker/opts"
versionPkg "github.com/docker/docker/pkg/version" versionPkg "github.com/docker/docker/pkg/version"
"github.com/docker/docker/reference" "github.com/docker/docker/reference"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
@ -97,15 +97,16 @@ func getData(c *cli.Context, ref reference.Named) (*imageInspect, error) {
if err := validateRepoName(repoInfo.Name()); err != nil { if err := validateRepoName(repoInfo.Name()); err != nil {
return nil, err return nil, err
} }
options := &registry.Options{} //options := &registry.Options{}
options.InsecureRegistries = opts.NewListOpts(nil) //options.Mirrors = opts.NewListOpts(nil)
options.Mirrors = opts.NewListOpts(nil) //options.InsecureRegistries = opts.NewListOpts(nil)
options.InsecureRegistries.Set("0.0.0.0/0") //options.InsecureRegistries.Set("0.0.0.0/0")
registryService := registry.NewService(options) //registryService := registry.NewService(options)
// TODO(runcom): hacky, provide a way of passing tls cert (flag?) to be used to lookup registryService := registry.NewService(nil)
for _, ic := range registryService.Config.IndexConfigs { //// TODO(runcom): hacky, provide a way of passing tls cert (flag?) to be used to lookup
ic.Secure = false //for _, ic := range registryService.Config.IndexConfigs {
} //ic.Secure = false
//}
endpoints, err := registryService.LookupPullEndpoints(repoInfo) endpoints, err := registryService.LookupPullEndpoints(repoInfo)
if err != nil { if err != nil {
@ -127,7 +128,10 @@ func getData(c *cli.Context, ref reference.Named) (*imageInspect, error) {
return nil, err return nil, err
} }
if _, err := v1endpoint.Ping(); err != nil { if _, err := v1endpoint.Ping(); err != nil {
return nil, err if strings.Contains(err.Error(), "timeout") {
return nil, err
}
continue
} }
if confirmedV2 && endpoint.Version == registry.APIVersion1 { if confirmedV2 && endpoint.Version == registry.APIVersion1 {

View File

@ -36,10 +36,6 @@ func (mf *v1ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) (*i
// Allowing fallback, because HTTPS v1 is before HTTP v2 // Allowing fallback, because HTTPS v1 is before HTTP v2
return nil, fallbackError{err: registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}} return nil, fallbackError{err: registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}}
} }
tag := ""
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
tag = tagged.Tag()
}
tlsConfig, err := mf.service.TLSConfig(mf.repoInfo.Index.Name) tlsConfig, err := mf.service.TLSConfig(mf.repoInfo.Index.Name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -62,62 +58,60 @@ func (mf *v1ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) (*i
logrus.Debugf("Fallback from error: %s", err) logrus.Debugf("Fallback from error: %s", err)
return nil, fallbackError{err: err} return nil, fallbackError{err: err}
} }
imgInspect, err = mf.fetchWithSession(ctx, tag) imgInspect, err = mf.fetchWithSession(ctx, ref)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return imgInspect, nil return imgInspect, nil
} }
func (mf *v1ManifestFetcher) fetchWithSession(ctx context.Context, askedTag string) (*imageInspect, error) { func (mf *v1ManifestFetcher) fetchWithSession(ctx context.Context, ref reference.Named) (*imageInspect, error) {
repoData, err := mf.session.GetRepositoryData(mf.repoInfo) repoData, err := mf.session.GetRepositoryData(mf.repoInfo)
if err != nil { if err != nil {
if strings.Contains(err.Error(), "HTTP code: 404") { if strings.Contains(err.Error(), "HTTP code: 404") {
return nil, fmt.Errorf("Error: image %s not found", mf.repoInfo.RemoteName) return nil, fmt.Errorf("Error: image %s not found", mf.repoInfo.RemoteName())
} }
// Unexpected HTTP error // Unexpected HTTP error
return nil, err return nil, err
} }
logrus.Debugf("Retrieving the tag list from V1 endpoints") var tagsList map[string]string
tagsList, err := mf.session.GetRemoteTags(repoData.Endpoints, mf.repoInfo) tagsList, err = mf.session.GetRemoteTags(repoData.Endpoints, mf.repoInfo)
if err != nil { if err != nil {
logrus.Errorf("Unable to get remote tags: %s", err) logrus.Errorf("unable to get remote tags: %s", err)
return nil, err return nil, err
} }
if len(tagsList) < 1 {
return nil, fmt.Errorf("No tags available for remote repository %s", mf.repoInfo.FullName()) logrus.Debugf("Retrieving the tag list")
tagged, isTagged := ref.(reference.NamedTagged)
var tagID, tag string
if isTagged {
tag = tagged.Tag()
tagsList[tagged.Tag()] = tagID
} else {
ref, err = reference.WithTag(ref, reference.DefaultTag)
if err != nil {
return nil, err
}
tagged, _ := ref.(reference.NamedTagged)
tag = tagged.Tag()
tagsList[tagged.Tag()] = tagID
}
tagID, err = mf.session.GetRemoteTag(repoData.Endpoints, mf.repoInfo, tag)
if err == registry.ErrRepoNotFound {
return nil, fmt.Errorf("Tag %s not found in repository %s", tag, mf.repoInfo.FullName())
}
if err != nil {
logrus.Errorf("unable to get remote tags: %s", err)
return nil, err
} }
tagList := []string{} tagList := []string{}
for tag, id := range tagsList { for tag, _ := range tagsList {
tagList = append(tagList, tag) tagList = append(tagList, tag)
repoData.ImgList[id] = &registry.ImgData{
ID: id,
Tag: tag,
Checksum: "",
}
} }
// If no tag has been specified, choose `latest` if it exists img := repoData.ImgList[tagID]
if askedTag == "" {
if _, exists := tagsList[reference.DefaultTag]; exists {
askedTag = reference.DefaultTag
}
}
if askedTag == "" {
// fallback to any tag in the repository
for tag := range tagsList {
askedTag = tag
break
}
}
id, exists := tagsList[askedTag]
if !exists {
return nil, fmt.Errorf("Tag %s not found in repository %s", askedTag, mf.repoInfo.FullName())
}
img := repoData.ImgList[id]
var pulledImg *image.Image var pulledImg *image.Image
for _, ep := range mf.repoInfo.Index.Mirrors { for _, ep := range mf.repoInfo.Index.Mirrors {
@ -142,10 +136,10 @@ func (mf *v1ManifestFetcher) fetchWithSession(ctx context.Context, askedTag stri
return nil, fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, mf.repoInfo.FullName(), err) return nil, fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, mf.repoInfo.FullName(), err)
} }
if pulledImg == nil { if pulledImg == nil {
return nil, fmt.Errorf("No such image %s:%s", mf.repoInfo.FullName(), askedTag) return nil, fmt.Errorf("No such image %s:%s", mf.repoInfo.FullName(), tag)
} }
return makeImageInspect(pulledImg, askedTag, "", tagList), nil return makeImageInspect(pulledImg, tag, "", tagList), nil
} }
func (mf *v1ManifestFetcher) pullImageJSON(imgID, endpoint string, token []string) (*image.Image, error) { func (mf *v1ManifestFetcher) pullImageJSON(imgID, endpoint string, token []string) (*image.Image, error) {

View File

@ -63,8 +63,6 @@ func (mf *v2ManifestFetcher) fetchWithRepository(ctx context.Context, ref refere
manifest distribution.Manifest manifest distribution.Manifest
tagOrDigest string // Used for logging/progress only tagOrDigest string // Used for logging/progress only
tagList = []string{} tagList = []string{}
tag string
) )
manSvc, err := mf.repo.Manifests(ctx) manSvc, err := mf.repo.Manifests(ctx)
@ -72,55 +70,47 @@ func (mf *v2ManifestFetcher) fetchWithRepository(ctx context.Context, ref refere
return nil, err return nil, err
} }
tagList, err = mf.repo.Tags(ctx).All(ctx) if _, isTagged := ref.(reference.NamedTagged); !isTagged {
if err != nil { ref, err = reference.WithTag(ref, reference.DefaultTag)
return nil, allowV1Fallback(err) if err != nil {
return nil, err
}
} }
// The v2 registry knows about this repository, so we will not
// allow fallback to the v1 protocol even if we encounter an
// error later on.
mf.confirmedV2 = true
if digested, isDigested := ref.(reference.Canonical); isDigested { if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
// NOTE: not using TagService.Get, since it uses HEAD requests
// against the manifests endpoint, which are not supported by
// all registry versions.
manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag()))
if err != nil {
return nil, allowV1Fallback(err)
}
tagOrDigest = tagged.Tag()
} else if digested, isDigested := ref.(reference.Canonical); isDigested {
manifest, err = manSvc.Get(ctx, digested.Digest()) manifest, err = manSvc.Get(ctx, digested.Digest())
if err != nil { if err != nil {
return nil, err return nil, err
} }
tagOrDigest = digested.Digest().String() tagOrDigest = digested.Digest().String()
} else { } else {
if tagged, isTagged := ref.(reference.NamedTagged); isTagged { return nil, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
tagOrDigest = tagged.Tag()
tag = tagOrDigest
} else {
for _, t := range tagList {
if t == reference.DefaultTag {
tag = t
}
}
if tag == "" && len(tagList) > 0 {
tag = tagList[0]
}
if tag == "" {
return nil, fmt.Errorf("No tags available for remote repository %s", mf.repoInfo.FullName())
}
}
// NOTE: not using TagService.Get, since it uses HEAD requests
// against the manifests endpoint, which are not supported by
// all registry versions.
manifest, err = manSvc.Get(ctx, "", client.WithTag(tag))
if err != nil {
return nil, allowV1Fallback(err)
}
// If manSvc.Get succeeded, we can be confident that the registry on
// the other side speaks the v2 protocol.
mf.confirmedV2 = true
} }
if manifest == nil { if manifest == nil {
return nil, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) return nil, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
} }
// If manSvc.Get succeeded, we can be confident that the registry on
// the other side speaks the v2 protocol.
mf.confirmedV2 = true
tagList, err = mf.repo.Tags(ctx).All(ctx)
if err != nil {
// If this repository doesn't exist on V2, we should
// permit a fallback to V1.
return nil, allowV1Fallback(err)
}
var ( var (
image *image.Image image *image.Image
manifestDigest digest.Digest manifestDigest digest.Digest
@ -154,7 +144,7 @@ func (mf *v2ManifestFetcher) fetchWithRepository(ctx context.Context, ref refere
//ref = reference.WithDefaultTag(ref) //ref = reference.WithDefaultTag(ref)
//} //}
//_ = showTags //_ = showTags
return makeImageInspect(image, tag, manifestDigest, tagList), nil return makeImageInspect(image, tagOrDigest, manifestDigest, tagList), nil
} }
func (mf *v2ManifestFetcher) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (img *image.Image, manifestDigest digest.Digest, err error) { func (mf *v2ManifestFetcher) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (img *image.Image, manifestDigest digest.Digest, err error) {