mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 22:08:44 +00:00
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8094910c9a | ||
|
|
82b121caf1 | ||
|
|
89631ab4f1 | ||
|
|
4d7ac1999e | ||
|
|
370f1bc685 | ||
|
|
51c104bde0 | ||
|
|
286ab4e144 | ||
|
|
34fed9cabc | ||
|
|
01b3c23ffa | ||
|
|
7fc29e2323 | ||
|
|
b03817412b | ||
|
|
03b19aa069 | ||
|
|
69b84ce650 | ||
|
|
044e9b5390 |
36
Dockerfile
36
Dockerfile
@@ -1,14 +1,21 @@
|
||||
FROM fedora
|
||||
|
||||
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md2man golang-github-Sirupsen-logrus-devel golang-github-codegangsta-cli-devel golang-golangorg-net-devel
|
||||
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md2man \
|
||||
# registry v1 deps
|
||||
xz-devel \
|
||||
python-devel \
|
||||
python-pip \
|
||||
swig \
|
||||
redhat-rpm-config \
|
||||
openssl-devel \
|
||||
patch
|
||||
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
# Install three versions of the registry. The first is an older version that
|
||||
# only supports schema1 manifests. The second is a newer version that supports
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests.
|
||||
# and schema2 manifests. Install registry v1 also.
|
||||
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
|
||||
#ENV REGISTRY_COMMIT_v1 TODO(runcom)
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||
@@ -18,14 +25,21 @@ RUN set -x \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
|
||||
&& rm -rf "$GOPATH"
|
||||
&& rm -rf "$GOPATH" \
|
||||
&& export DRV1="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/docker-registry.git "$DRV1" \
|
||||
# no need for setuptools since we have a version conflict with fedora
|
||||
&& sed -i.bak s/setuptools==5.8//g "$DRV1/requirements/main.txt" \
|
||||
&& sed -i.bak s/setuptools==5.8//g "$DRV1/depends/docker-registry-core/requirements/main.txt" \
|
||||
&& pip install "$DRV1/depends/docker-registry-core" \
|
||||
&& pip install file://"$DRV1#egg=docker-registry[bugsnag,newrelic,cors]" \
|
||||
&& patch $(python -c 'import boto; import os; print os.path.dirname(boto.__file__)')/connection.py \
|
||||
< "$DRV1/contrib/boto_header_patch.diff" \
|
||||
&& dnf -y update && dnf install -y m2crypto
|
||||
|
||||
ENV GOPATH /usr/share/gocode:/go
|
||||
WORKDIR /go/src/github.com/runcom/skopeo
|
||||
WORKDIR /go/src/github.com/projectatomic/skopeo
|
||||
|
||||
COPY . /go/src/github.com/projectatomic/skopeo
|
||||
|
||||
#ENTRYPOINT ["hack/dind"]
|
||||
|
||||
COPY . /go/src/github.com/runcom/skopeo
|
||||
|
||||
# remove distro-supplied dependencies, so we build against them and the rest from vendor/
|
||||
RUN rm -rf /go/src/github.com/runcom/skopeo/vendor/golang.org && rm -rf /go/src/github.com/runcom/skopeo/vendor/github.com/Sirupsen && rm -rf /go/src/github.com/runcom/skopeo/vendor/github.com/codegangsta
|
||||
|
||||
21
README.md
21
README.md
@@ -1,11 +1,11 @@
|
||||
skopeo [](https://travis-ci.org/runcom/skopeo)
|
||||
skopeo [](https://travis-ci.org/projectatomic/skopeo)
|
||||
=
|
||||
|
||||
_Please be aware `skopeo` is still work in progress_
|
||||
|
||||
`skopeo` is a command line utility which is able to _inspect_ a repository on a Docker registry.
|
||||
By _inspect_ I mean it fetches the repository's manifest and it is able to show you a `docker inspect`-like
|
||||
json output about a whole repository or a tag. This tool, in constrast to `docker inspect`, helps you gather useful information about
|
||||
json output about a whole repository or a tag. This tool, in contrast to `docker inspect`, helps you gather useful information about
|
||||
a repository or a tag before pulling it (using disk space) - e.g. - which tags are available for the given repository? which labels the image has?
|
||||
|
||||
Examples:
|
||||
@@ -79,9 +79,11 @@ Building
|
||||
-
|
||||
To build `skopeo` you need at least Go 1.5 because it uses the latest `GO15VENDOREXPERIMENT` flag. Also, make sure to clone the repository in your `GOPATH` - otherwise compilation fails.
|
||||
```sh
|
||||
$ cd $GOPATH/src/github.com # make sure you have github.com folder otherwise just create it
|
||||
$ git clone https://github.com/runcom/skopeo
|
||||
$ cd runcom && make binary
|
||||
$ cd $GOPATH/src
|
||||
$ mkdir -p github.com/projectatomic
|
||||
$ cd projectatomic
|
||||
$ git clone https://github.com/projectatomic/skopeo
|
||||
$ cd skopeo && make binary
|
||||
```
|
||||
Man:
|
||||
-
|
||||
@@ -91,9 +93,14 @@ $ make man
|
||||
```
|
||||
Installing
|
||||
-
|
||||
If you built from source:
|
||||
```sh
|
||||
$ sudo make install
|
||||
```
|
||||
`skopeo` is also available from Fedora 23:
|
||||
```sh
|
||||
sudo dnf install skopeo
|
||||
```
|
||||
Tests
|
||||
-
|
||||
_You need Docker installed on your system in order to run the test suite_
|
||||
@@ -102,6 +109,10 @@ $ make test-integration
|
||||
```
|
||||
TODO
|
||||
-
|
||||
- make skopeo docker registry v2 only
|
||||
- output raw manifest
|
||||
- download layers and support docker load tar(s)
|
||||
- get rid of docker/docker code (?)
|
||||
- show repo tags via flag or when reference isn't tagged or digested
|
||||
- add tests (integration with deployed registries in container - Docker-like)
|
||||
- support rkt/appc image spec
|
||||
|
||||
@@ -4,14 +4,20 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/docker/distribution/digest"
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/cliconfig"
|
||||
"github.com/docker/docker/distribution"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/opts"
|
||||
versionPkg "github.com/docker/docker/pkg/version"
|
||||
@@ -19,7 +25,7 @@ import (
|
||||
"github.com/docker/docker/registry"
|
||||
engineTypes "github.com/docker/engine-api/types"
|
||||
registryTypes "github.com/docker/engine-api/types/registry"
|
||||
"github.com/runcom/skopeo/types"
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@@ -32,6 +38,7 @@ type fallbackError struct {
|
||||
// supports the v2 protocol. This is used to limit fallbacks to the v1
|
||||
// protocol.
|
||||
confirmedV2 bool
|
||||
transportOK bool
|
||||
}
|
||||
|
||||
// Error renders the FallbackError as a string.
|
||||
@@ -84,7 +91,7 @@ func GetData(c *cli.Context, name string) (*types.ImageInspect, error) {
|
||||
ic.Secure = false
|
||||
}
|
||||
|
||||
endpoints, err := registryService.LookupPullEndpoints(repoInfo)
|
||||
endpoints, err := registryService.LookupPullEndpoints(repoInfo.Hostname())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -96,11 +103,12 @@ func GetData(c *cli.Context, name string) (*types.ImageInspect, error) {
|
||||
discardNoSupportErrors bool
|
||||
imgInspect *types.ImageInspect
|
||||
confirmedV2 bool
|
||||
confirmedTLSRegistries = make(map[string]struct{})
|
||||
)
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
// make sure I can reach the registry, same as docker pull does
|
||||
v1endpoint, err := endpoint.ToV1Endpoint(nil)
|
||||
v1endpoint, err := endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -115,6 +123,14 @@ func GetData(c *cli.Context, name string) (*types.ImageInspect, error) {
|
||||
logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
|
||||
if endpoint.URL.Scheme != "https" {
|
||||
if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
|
||||
logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Trying to fetch image manifest of %s repository from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version)
|
||||
|
||||
//fetcher, err := newManifestFetcher(endpoint, repoInfo, config)
|
||||
@@ -133,11 +149,14 @@ func GetData(c *cli.Context, name string) (*types.ImageInspect, error) {
|
||||
if fallbackErr, ok := err.(fallbackError); ok {
|
||||
fallback = true
|
||||
confirmedV2 = confirmedV2 || fallbackErr.confirmedV2
|
||||
if fallbackErr.transportOK && endpoint.URL.Scheme == "https" {
|
||||
confirmedTLSRegistries[endpoint.URL.Host] = struct{}{}
|
||||
}
|
||||
err = fallbackErr.err
|
||||
}
|
||||
}
|
||||
if fallback {
|
||||
if _, ok := err.(registry.ErrNoSupport); !ok {
|
||||
if _, ok := err.(distribution.ErrNoSupport); !ok {
|
||||
// Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors.
|
||||
discardNoSupportErrors = true
|
||||
// save the current error
|
||||
@@ -149,7 +168,7 @@ func GetData(c *cli.Context, name string) (*types.ImageInspect, error) {
|
||||
}
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Not continuing with error: %v", err)
|
||||
logrus.Errorf("Not continuing with pull after error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -290,3 +309,37 @@ func rawJSON(value interface{}) *json.RawMessage {
|
||||
}
|
||||
return (*json.RawMessage)(&jsonval)
|
||||
}
|
||||
|
||||
func continueOnError(err error) bool {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) == 0 {
|
||||
return true
|
||||
}
|
||||
return continueOnError(v[0])
|
||||
case distribution.ErrNoSupport:
|
||||
return continueOnError(v.Err)
|
||||
case errcode.Error:
|
||||
return shouldV2Fallback(v)
|
||||
case *client.UnexpectedHTTPResponseError:
|
||||
return true
|
||||
case ImageConfigPullError:
|
||||
return false
|
||||
case error:
|
||||
return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error()))
|
||||
}
|
||||
// let's be nice and fallback if the error is a completely
|
||||
// unexpected one.
|
||||
// If new errors have to be handled in some way, please
|
||||
// add them to the switch above.
|
||||
return true
|
||||
}
|
||||
|
||||
// shouldV2Fallback returns true if this error is a reason to fall back to v1.
|
||||
func shouldV2Fallback(err errcode.Error) bool {
|
||||
switch err.Code {
|
||||
case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -9,12 +9,14 @@ import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
dockerdistribution "github.com/docker/docker/distribution"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
engineTypes "github.com/docker/engine-api/types"
|
||||
"github.com/runcom/skopeo/types"
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@@ -35,7 +37,7 @@ func (mf *v1ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) (*t
|
||||
)
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
// Allowing fallback, because HTTPS v1 is before HTTP v2
|
||||
return nil, fallbackError{err: registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}}
|
||||
return nil, fallbackError{err: dockerdistribution.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}}
|
||||
}
|
||||
tlsConfig, err := mf.service.TLSConfig(mf.repoInfo.Index.Name)
|
||||
if err != nil {
|
||||
@@ -45,11 +47,11 @@ func (mf *v1ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) (*t
|
||||
tr := transport.NewTransport(
|
||||
registry.NewTransport(tlsConfig),
|
||||
//registry.DockerHeaders(mf.config.MetaHeaders)...,
|
||||
registry.DockerHeaders(nil)...,
|
||||
registry.DockerHeaders(dockerversion.DockerUserAgent(), nil)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
//v1Endpoint, err := mf.endpoint.ToV1Endpoint(mf.config.MetaHeaders)
|
||||
v1Endpoint, err := mf.endpoint.ToV1Endpoint(nil)
|
||||
v1Endpoint, err := mf.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(), nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not get v1 endpoint: %v", err)
|
||||
return nil, fallbackError{err: err}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
engineTypes "github.com/docker/engine-api/types"
|
||||
"github.com/runcom/skopeo/types"
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@@ -44,7 +44,7 @@ func (mf *v2ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) (*t
|
||||
mf.repo, mf.confirmedV2, err = dockerdistribution.NewV2Repository(ctx, mf.repoInfo, mf.endpoint, nil, &mf.authConfig, "pull")
|
||||
if err != nil {
|
||||
logrus.Debugf("Error getting v2 registry: %v", err)
|
||||
return nil, fallbackError{err: err, confirmedV2: mf.confirmedV2}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imgInspect, err = mf.fetchWithRepository(ctx, ref)
|
||||
@@ -52,8 +52,9 @@ func (mf *v2ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) (*t
|
||||
if _, ok := err.(fallbackError); ok {
|
||||
return nil, err
|
||||
}
|
||||
if registry.ContinueOnError(err) {
|
||||
err = fallbackError{err: err, confirmedV2: mf.confirmedV2}
|
||||
if continueOnError(err) {
|
||||
logrus.Errorf("Error trying v2 registry: %v", err)
|
||||
return nil, fallbackError{err: err, confirmedV2: mf.confirmedV2, transportOK: true}
|
||||
}
|
||||
}
|
||||
return imgInspect, err
|
||||
@@ -294,7 +295,7 @@ func (mf *v2ManifestFetcher) pullSchema2(ctx context.Context, ref reference.Name
|
||||
go func() {
|
||||
configJSON, err := mf.pullSchema2ImageConfig(ctx, target.Digest)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
errChan <- ImageConfigPullError{Err: err}
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
@@ -369,6 +370,17 @@ func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, imag
|
||||
}
|
||||
}
|
||||
|
||||
// ImageConfigPullError is an error pulling the image config blob
|
||||
// (only applies to schema2).
|
||||
type ImageConfigPullError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error returns the error string for ImageConfigPullError.
|
||||
func (e ImageConfigPullError) Error() string {
|
||||
return "error pulling image configuration: " + e.Err.Error()
|
||||
}
|
||||
|
||||
// allowV1Fallback checks if the error is a possible reason to fallback to v1
|
||||
// (even if confirmedV2 has been set already), and if so, wraps the error in
|
||||
// a fallbackError with confirmedV2 set to false. Otherwise, it returns the
|
||||
@@ -377,13 +389,13 @@ func allowV1Fallback(err error) error {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) != 0 {
|
||||
if v0, ok := v[0].(errcode.Error); ok && registry.ShouldV2Fallback(v0) {
|
||||
return fallbackError{err: err, confirmedV2: false}
|
||||
if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
|
||||
return fallbackError{err: err, confirmedV2: false, transportOK: true}
|
||||
}
|
||||
}
|
||||
case errcode.Error:
|
||||
if registry.ShouldV2Fallback(v) {
|
||||
return fallbackError{err: err, confirmedV2: false}
|
||||
if shouldV2Fallback(v) {
|
||||
return fallbackError{err: err, confirmedV2: false, transportOK: true}
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -19,7 +19,7 @@ set -e
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export SKOPEO_PKG='github.com/runcom/skopeo'
|
||||
export SKOPEO_PKG='github.com/projectatomic/skopeo'
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
|
||||
|
||||
@@ -7,15 +7,15 @@ source 'hack/.vendor-helpers.sh'
|
||||
|
||||
clone git github.com/codegangsta/cli v1.2.0
|
||||
clone git github.com/Sirupsen/logrus v0.8.7
|
||||
clone git github.com/vbatts/tar-split master
|
||||
clone git github.com/vbatts/tar-split v0.9.11
|
||||
clone git github.com/gorilla/mux master
|
||||
clone git github.com/gorilla/context master
|
||||
clone git golang.org/x/net master https://github.com/golang/net.git
|
||||
clone git github.com/go-check/check v1
|
||||
|
||||
clone git github.com/docker/docker v1.10.2
|
||||
clone git github.com/docker/engine-api v0.2.3
|
||||
clone git github.com/docker/distribution 0f2d99b13ae0cfbcf118eff103e6e680b726b47e
|
||||
clone git github.com/docker/docker 29bade2cd0a09191279f04ebc6aeedaa70c772a0
|
||||
clone git github.com/docker/engine-api 7f6071353fc48f69d2328c4ebe8f3bd0f7c75da4
|
||||
clone git github.com/docker/distribution 7b66c50bb7e0e4b3b83f8fd134a9f6ea4be08b57
|
||||
|
||||
clone git github.com/docker/go-connections master
|
||||
clone git github.com/docker/go-units master
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
"github.com/runcom/skopeo/docker"
|
||||
"github.com/runcom/skopeo/types"
|
||||
"github.com/projectatomic/skopeo/docker"
|
||||
"github.com/projectatomic/skopeo/types"
|
||||
)
|
||||
|
||||
type imgKind int
|
||||
|
||||
@@ -31,7 +31,7 @@ type SkopeoSuite struct {
|
||||
regV1 *testRegistryV1
|
||||
regV2 *testRegistryV2
|
||||
regV2Shema1 *testRegistryV2
|
||||
regV1WithAuth *testRegistryV1
|
||||
regV1WithAuth *testRegistryV1 // does v1 support auth?
|
||||
regV2WithAuth *testRegistryV2
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func (s *SkopeoSuite) SetUpTest(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
s.regV1 = setupRegistryV1At(c, privateRegistryURL0, false) // not used
|
||||
s.regV1 = setupRegistryV1At(c, privateRegistryURL0, false) // TODO:(runcom)
|
||||
s.regV2 = setupRegistryV2At(c, privateRegistryURL1, false, false)
|
||||
s.regV2Shema1 = setupRegistryV2At(c, privateRegistryURL2, false, true)
|
||||
s.regV1WithAuth = setupRegistryV1At(c, privateRegistryURL3, true) // not used
|
||||
|
||||
@@ -13,15 +13,21 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
binaryV1 = "docker-registry"
|
||||
binaryV2 = "registry-v2"
|
||||
binaryV2Schema1 = "registry-v2-schema1"
|
||||
)
|
||||
|
||||
type testRegistryV1 struct {
|
||||
cmd *exec.Cmd
|
||||
url string
|
||||
dir string
|
||||
}
|
||||
|
||||
func setupRegistryV1At(c *check.C, url string, auth bool) *testRegistryV1 {
|
||||
return &testRegistryV1{}
|
||||
return &testRegistryV1{
|
||||
url: url,
|
||||
}
|
||||
}
|
||||
|
||||
type testRegistryV2 struct {
|
||||
|
||||
2
main.go
2
main.go
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
version = "0.1.9"
|
||||
version = "0.1.10-dev"
|
||||
usage = "inspect images on a registry"
|
||||
)
|
||||
|
||||
|
||||
3
vendor/github.com/docker/distribution/.mailmap
generated
vendored
3
vendor/github.com/docker/distribution/.mailmap
generated
vendored
@@ -2,6 +2,7 @@ Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.gith
|
||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
|
||||
@@ -11,4 +12,4 @@ Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
|
||||
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
|
||||
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
|
||||
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
|
||||
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
|
||||
|
||||
9
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
9
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
@@ -21,6 +21,7 @@ Ben Firshman <ben@firshman.co.uk>
|
||||
bin liu <liubin0329@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com>
|
||||
burnettk <burnettk@gmail.com>
|
||||
Carson A <ca@carsonoid.net>
|
||||
Chris Dillon <squarism@gmail.com>
|
||||
Daisuke Fujita <dtanshi45@gmail.com>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
@@ -33,11 +34,13 @@ davidli <wenquan.li@hp.com>
|
||||
Dejan Golja <dejan@golja.org>
|
||||
Derek McGowan <derek@mcgstyle.net>
|
||||
Diogo Mónica <diogo.monica@gmail.com>
|
||||
DJ Enriquez <dj.enriquez@infospace.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
farmerworking <farmerworking@gmail.com>
|
||||
Florentin Raud <florentin.raud@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
||||
harche <p.harshal@gmail.com>
|
||||
Henri Gomez <henri.gomez@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
@@ -55,7 +58,9 @@ Josh Hawn <josh.hawn@docker.com>
|
||||
Julien Fernandez <julien.fernandez@gmail.com>
|
||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||
Kenneth Lim <kennethlimcp@gmail.com>
|
||||
Kenny Leung <kleung@google.com>
|
||||
Li Yi <denverdino@gmail.com>
|
||||
Liu Hua <sdu.liu@huawei.com>
|
||||
Louis Kottmann <louis.kottmann@gmail.com>
|
||||
Luke Carpenter <x@rubynerd.net>
|
||||
Mary Anthony <mary@docker.com>
|
||||
@@ -76,7 +81,9 @@ Olivier Jacques <olivier.jacques@hp.com>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Philip Misiowiec <philip@atlashealth.com>
|
||||
Richard Scothern <richard.scothern@docker.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Rusty Conover <rusty@luckydinosaur.com>
|
||||
Sean Boran <Boran@users.noreply.github.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sharif Nassar <sharif@mrwacky.com>
|
||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
||||
@@ -93,11 +100,13 @@ Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Trevor Pounds <trevor.pounds@gmail.com>
|
||||
Troels Thomsen <troels@thomsen.io>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
W. Trevor King <wking@tremily.us>
|
||||
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
||||
xg.song <xg.song@venusource.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
||||
|
||||
5
vendor/github.com/docker/distribution/MAINTAINERS
generated
vendored
5
vendor/github.com/docker/distribution/MAINTAINERS
generated
vendored
@@ -32,6 +32,11 @@
|
||||
Email = "aaron.lehmann@docker.com"
|
||||
GitHub = "aaronlehmann"
|
||||
|
||||
[people.brianbland]
|
||||
Name = "Brian Bland"
|
||||
Email = "brian.bland@docker.com"
|
||||
GitHub = "BrianBland"
|
||||
|
||||
[people.dmcgowan]
|
||||
Name = "Derek McGowan"
|
||||
Email = "derek@mcgstyle.net"
|
||||
|
||||
2
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
2
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
@@ -1,6 +1,6 @@
|
||||
// Package context provides several utilities for working with
|
||||
// golang.org/x/net/context in http requests. Primarily, the focus is on
|
||||
// logging relevent request information but this package is not limited to
|
||||
// logging relevant request information but this package is not limited to
|
||||
// that purpose.
|
||||
//
|
||||
// The easiest way to get started is to get the background context:
|
||||
|
||||
2
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
2
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
@@ -10,7 +10,7 @@ import (
|
||||
// WithTrace allocates a traced timing span in a new context. This allows a
|
||||
// caller to track the time between calling WithTrace and the returned done
|
||||
// function. When the done function is called, a log message is emitted with a
|
||||
// "trace.duration" field, corresponding to the elapased time and a
|
||||
// "trace.duration" field, corresponding to the elapsed time and a
|
||||
// "trace.func" field, corresponding to the function that called WithTrace.
|
||||
//
|
||||
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
|
||||
|
||||
2
vendor/github.com/docker/distribution/digest/set.go
generated
vendored
2
vendor/github.com/docker/distribution/digest/set.go
generated
vendored
@@ -22,7 +22,7 @@ var (
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are ommited from this set,
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
|
||||
21
vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
21
vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
@@ -39,10 +40,8 @@ type configManifestBuilder struct {
|
||||
// configJSON is configuration supplied when the ManifestBuilder was
|
||||
// created.
|
||||
configJSON []byte
|
||||
// name is the name provided to NewConfigManifestBuilder
|
||||
name string
|
||||
// tag is the tag provided to NewConfigManifestBuilder
|
||||
tag string
|
||||
// ref contains the name and optional tag provided to NewConfigManifestBuilder.
|
||||
ref reference.Named
|
||||
// descriptors is the set of descriptors referencing the layers.
|
||||
descriptors []distribution.Descriptor
|
||||
// emptyTarDigest is set to a valid digest if an empty tar has been
|
||||
@@ -54,13 +53,12 @@ type configManifestBuilder struct {
|
||||
// schema version from an image configuration and a set of descriptors.
|
||||
// It takes a BlobService so that it can add an empty tar to the blob store
|
||||
// if the resulting manifest needs empty layers.
|
||||
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, name, tag string, configJSON []byte) distribution.ManifestBuilder {
|
||||
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder {
|
||||
return &configManifestBuilder{
|
||||
bs: bs,
|
||||
pk: pk,
|
||||
configJSON: configJSON,
|
||||
name: name,
|
||||
tag: tag,
|
||||
ref: ref,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,12 +188,17 @@ func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Mani
|
||||
|
||||
history[0].V1Compatibility = string(transformedConfig)
|
||||
|
||||
tag := ""
|
||||
if tagged, isTagged := mb.ref.(reference.Tagged); isTagged {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
|
||||
mfst := Manifest{
|
||||
Versioned: manifest.Versioned{
|
||||
SchemaVersion: 1,
|
||||
},
|
||||
Name: mb.name,
|
||||
Tag: mb.tag,
|
||||
Name: mb.ref.Name(),
|
||||
Tag: tag,
|
||||
Architecture: img.Architecture,
|
||||
FSLayers: fsLayerList,
|
||||
History: history,
|
||||
|
||||
2
vendor/github.com/docker/distribution/manifest/schema1/manifest.go
generated
vendored
2
vendor/github.com/docker/distribution/manifest/schema1/manifest.go
generated
vendored
@@ -102,7 +102,7 @@ type SignedManifest struct {
|
||||
Canonical []byte `json:"-"`
|
||||
|
||||
// all contains the byte representation of the Manifest including signatures
|
||||
// and is retuend by Payload()
|
||||
// and is returned by Payload()
|
||||
all []byte
|
||||
}
|
||||
|
||||
|
||||
10
vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go
generated
vendored
10
vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
@@ -20,13 +21,18 @@ type referenceManifestBuilder struct {
|
||||
|
||||
// NewReferenceManifestBuilder is used to build new manifests for the current
|
||||
// schema version using schema1 dependencies.
|
||||
func NewReferenceManifestBuilder(pk libtrust.PrivateKey, name, tag, architecture string) distribution.ManifestBuilder {
|
||||
func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder {
|
||||
tag := ""
|
||||
if tagged, isTagged := ref.(reference.Tagged); isTagged {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
|
||||
return &referenceManifestBuilder{
|
||||
Manifest: Manifest{
|
||||
Versioned: manifest.Versioned{
|
||||
SchemaVersion: 1,
|
||||
},
|
||||
Name: name,
|
||||
Name: ref.Name(),
|
||||
Tag: tag,
|
||||
Architecture: architecture,
|
||||
},
|
||||
|
||||
2
vendor/github.com/docker/distribution/manifests.go
generated
vendored
2
vendor/github.com/docker/distribution/manifests.go
generated
vendored
@@ -81,7 +81,7 @@ type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
|
||||
|
||||
var mappings = make(map[string]UnmarshalFunc, 0)
|
||||
|
||||
// UnmarshalManifest looks up manifest unmarshall functions based on
|
||||
// UnmarshalManifest looks up manifest unmarshal functions based on
|
||||
// MediaType
|
||||
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
|
||||
// Need to look up by the actual media type, not the raw contents of
|
||||
|
||||
2
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
2
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
@@ -49,7 +49,7 @@ var (
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the hostname and name part omitting
|
||||
// the seperating forward slash from either.
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(hostnameRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
|
||||
7
vendor/github.com/docker/distribution/registry.go
generated
vendored
7
vendor/github.com/docker/distribution/registry.go
generated
vendored
@@ -2,6 +2,7 @@ package distribution
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
)
|
||||
|
||||
// Scope defines the set of items that match a namespace.
|
||||
@@ -32,7 +33,7 @@ type Namespace interface {
|
||||
// Repository should return a reference to the named repository. The
|
||||
// registry may or may not have the repository but should always return a
|
||||
// reference.
|
||||
Repository(ctx context.Context, name string) (Repository, error)
|
||||
Repository(ctx context.Context, name reference.Named) (Repository, error)
|
||||
|
||||
// Repositories fills 'repos' with a lexigraphically sorted catalog of repositories
|
||||
// up to the size of 'repos' and returns the value 'n' for the number of entries
|
||||
@@ -48,8 +49,8 @@ type ManifestServiceOption interface {
|
||||
|
||||
// Repository is a named collection of manifests and layers.
|
||||
type Repository interface {
|
||||
// Name returns the name of the repository.
|
||||
Name() string
|
||||
// Named returns the name of the repository.
|
||||
Named() reference.Named
|
||||
|
||||
// Manifests returns a reference to this repository's manifest service.
|
||||
// with the supplied options applied.
|
||||
|
||||
6
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
6
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
@@ -271,7 +271,7 @@ type MethodDescriptor struct {
|
||||
// RequestDescriptor per API use case.
|
||||
type RequestDescriptor struct {
|
||||
// Name provides a short identifier for the request, usable as a title or
|
||||
// to provide quick context for the particalar request.
|
||||
// to provide quick context for the particular request.
|
||||
Name string
|
||||
|
||||
// Description should cover the requests purpose, covering any details for
|
||||
@@ -303,14 +303,14 @@ type RequestDescriptor struct {
|
||||
// ResponseDescriptor describes the components of an API response.
|
||||
type ResponseDescriptor struct {
|
||||
// Name provides a short identifier for the response, usable as a title or
|
||||
// to provide quick context for the particalar response.
|
||||
// to provide quick context for the particular response.
|
||||
Name string
|
||||
|
||||
// Description should provide a brief overview of the role of the
|
||||
// response.
|
||||
Description string
|
||||
|
||||
// StatusCode specifies the status recieved by this particular response.
|
||||
// StatusCode specifies the status received by this particular response.
|
||||
StatusCode int
|
||||
|
||||
// Headers covers any headers that may be returned from the response.
|
||||
|
||||
2
vendor/github.com/docker/distribution/registry/api/v2/errors.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/api/v2/errors.go
generated
vendored
@@ -84,7 +84,7 @@ var (
|
||||
})
|
||||
|
||||
// ErrorCodeManifestUnverified is returned when the manifest fails
|
||||
// signature verfication.
|
||||
// signature verification.
|
||||
ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "MANIFEST_UNVERIFIED",
|
||||
Message: "manifest failed signature verification",
|
||||
|
||||
30
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
30
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
@@ -5,7 +5,7 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
@@ -113,10 +113,10 @@ func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
|
||||
}
|
||||
|
||||
// BuildTagsURL constructs a url to list the tags in the named repository.
|
||||
func (ub *URLBuilder) BuildTagsURL(name string) (string, error) {
|
||||
func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) {
|
||||
route := ub.cloneRoute(RouteNameTags)
|
||||
|
||||
tagsURL, err := route.URL("name", name)
|
||||
tagsURL, err := route.URL("name", name.Name())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -126,10 +126,18 @@ func (ub *URLBuilder) BuildTagsURL(name string) (string, error) {
|
||||
|
||||
// BuildManifestURL constructs a url for the manifest identified by name and
|
||||
// reference. The argument reference may be either a tag or digest.
|
||||
func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) {
|
||||
func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
|
||||
route := ub.cloneRoute(RouteNameManifest)
|
||||
|
||||
manifestURL, err := route.URL("name", name, "reference", reference)
|
||||
tagOrDigest := ""
|
||||
switch v := ref.(type) {
|
||||
case reference.Tagged:
|
||||
tagOrDigest = v.Tag()
|
||||
case reference.Digested:
|
||||
tagOrDigest = v.Digest().String()
|
||||
}
|
||||
|
||||
manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -138,10 +146,10 @@ func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) {
|
||||
}
|
||||
|
||||
// BuildBlobURL constructs the url for the blob identified by name and dgst.
|
||||
func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) {
|
||||
func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) {
|
||||
route := ub.cloneRoute(RouteNameBlob)
|
||||
|
||||
layerURL, err := route.URL("name", name, "digest", dgst.String())
|
||||
layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -151,10 +159,10 @@ func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, err
|
||||
|
||||
// BuildBlobUploadURL constructs a url to begin a blob upload in the
|
||||
// repository identified by name.
|
||||
func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) {
|
||||
func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) {
|
||||
route := ub.cloneRoute(RouteNameBlobUpload)
|
||||
|
||||
uploadURL, err := route.URL("name", name)
|
||||
uploadURL, err := route.URL("name", name.Name())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -166,10 +174,10 @@ func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (str
|
||||
// including any url values. This should generally not be used by clients, as
|
||||
// this url is provided by server implementations during the blob upload
|
||||
// process.
|
||||
func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) {
|
||||
func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) {
|
||||
route := ub.cloneRoute(RouteNameBlobUploadChunk)
|
||||
|
||||
uploadURL, err := route.URL("name", name, "uuid", uuid)
|
||||
uploadURL, err := route.URL("name", name.Name(), "uuid", uuid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
8
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
8
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
@@ -15,6 +15,10 @@ import (
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
)
|
||||
|
||||
// ErrNoBasicAuthCredentials is returned if a request can't be authorized with
|
||||
// basic auth due to lack of credentials.
|
||||
var ErrNoBasicAuthCredentials = errors.New("no basic auth credentials")
|
||||
|
||||
// AuthenticationHandler is an interface for authorizing a request from
|
||||
// params from a "WWW-Authenicate" header for a single scheme.
|
||||
type AuthenticationHandler interface {
|
||||
@@ -285,9 +289,9 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon
|
||||
}
|
||||
|
||||
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
|
||||
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
|
||||
// The default/minimum lifetime.
|
||||
tr.ExpiresIn = minimumTokenLifetimeSeconds
|
||||
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
|
||||
}
|
||||
|
||||
if tr.IssuedAt.IsZero() {
|
||||
@@ -322,5 +326,5 @@ func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]st
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.New("no basic auth credentials")
|
||||
return ErrNoBasicAuthCredentials
|
||||
}
|
||||
|
||||
161
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
161
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
@@ -27,6 +27,39 @@ type Registry interface {
|
||||
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
|
||||
}
|
||||
|
||||
// checkHTTPRedirect is a callback that can manipulate redirected HTTP
|
||||
// requests. It is used to preserve Accept and Range headers.
|
||||
func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
|
||||
if len(via) > 0 {
|
||||
for headerName, headerVals := range via[0].Header {
|
||||
if headerName != "Accept" && headerName != "Range" {
|
||||
continue
|
||||
}
|
||||
for _, val := range headerVals {
|
||||
// Don't add to redirected request if redirected
|
||||
// request already has a header with the same
|
||||
// name and value.
|
||||
hasValue := false
|
||||
for _, existingVal := range req.Header[headerName] {
|
||||
if existingVal == val {
|
||||
hasValue = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasValue {
|
||||
req.Header.Add(headerName, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
|
||||
func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
|
||||
ub, err := v2.NewURLBuilderFromString(baseURL)
|
||||
@@ -35,8 +68,9 @@ func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTrippe
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: 1 * time.Minute,
|
||||
Transport: transport,
|
||||
Timeout: 1 * time.Minute,
|
||||
CheckRedirect: checkHTTPRedirect,
|
||||
}
|
||||
|
||||
return ®istry{
|
||||
@@ -98,18 +132,15 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri
|
||||
}
|
||||
|
||||
// NewRepository creates a new Repository for the given repository name and base URL.
|
||||
func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
|
||||
if _, err := reference.ParseNamed(name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
|
||||
ub, err := v2.NewURLBuilderFromString(baseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
Transport: transport,
|
||||
CheckRedirect: checkHTTPRedirect,
|
||||
// TODO(dmcgowan): create cookie jar
|
||||
}
|
||||
|
||||
@@ -125,21 +156,21 @@ type repository struct {
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
context context.Context
|
||||
name string
|
||||
name reference.Named
|
||||
}
|
||||
|
||||
func (r *repository) Name() string {
|
||||
func (r *repository) Named() reference.Named {
|
||||
return r.name
|
||||
}
|
||||
|
||||
func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
|
||||
statter := &blobStatter{
|
||||
name: r.Name(),
|
||||
name: r.name,
|
||||
ub: r.ub,
|
||||
client: r.client,
|
||||
}
|
||||
return &blobs{
|
||||
name: r.Name(),
|
||||
name: r.name,
|
||||
ub: r.ub,
|
||||
client: r.client,
|
||||
statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
|
||||
@@ -149,7 +180,7 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
|
||||
func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
|
||||
// todo(richardscothern): options should be sent over the wire
|
||||
return &manifests{
|
||||
name: r.Name(),
|
||||
name: r.name,
|
||||
ub: r.ub,
|
||||
client: r.client,
|
||||
etags: make(map[string]string),
|
||||
@@ -161,7 +192,7 @@ func (r *repository) Tags(ctx context.Context) distribution.TagService {
|
||||
client: r.client,
|
||||
ub: r.ub,
|
||||
context: r.context,
|
||||
name: r.Name(),
|
||||
name: r.Named(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,7 +201,7 @@ type tags struct {
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
context context.Context
|
||||
name string
|
||||
name reference.Named
|
||||
}
|
||||
|
||||
// All returns all tags
|
||||
@@ -253,7 +284,11 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e
|
||||
// to construct a descriptor for the tag. If the registry doesn't support HEADing
|
||||
// a manifest, fallback to GET.
|
||||
func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
|
||||
u, err := t.ub.BuildManifestURL(t.name, tag)
|
||||
ref, err := reference.WithTag(t.name, tag)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
u, err := t.ub.BuildManifestURL(ref)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
@@ -293,14 +328,18 @@ func (t *tags) Untag(ctx context.Context, tag string) error {
|
||||
}
|
||||
|
||||
type manifests struct {
|
||||
name string
|
||||
name reference.Named
|
||||
ub *v2.URLBuilder
|
||||
client *http.Client
|
||||
etags map[string]string
|
||||
}
|
||||
|
||||
func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
|
||||
u, err := ms.ub.BuildManifestURL(ms.name, dgst.String())
|
||||
ref, err := reference.WithDigest(ms.name, dgst)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
u, err := ms.ub.BuildManifestURL(ref)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -337,11 +376,19 @@ func (o etagOption) Apply(ms distribution.ManifestService) error {
|
||||
}
|
||||
|
||||
func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
|
||||
var (
|
||||
digestOrTag string
|
||||
ref reference.Named
|
||||
err error
|
||||
)
|
||||
|
||||
var tag string
|
||||
for _, option := range options {
|
||||
if opt, ok := option.(withTagOption); ok {
|
||||
tag = opt.tag
|
||||
digestOrTag = opt.tag
|
||||
ref, err = reference.WithTag(ms.name, opt.tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := option.Apply(ms)
|
||||
if err != nil {
|
||||
@@ -350,14 +397,15 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
|
||||
}
|
||||
}
|
||||
|
||||
var ref string
|
||||
if tag != "" {
|
||||
ref = tag
|
||||
} else {
|
||||
ref = dgst.String()
|
||||
if digestOrTag == "" {
|
||||
digestOrTag = dgst.String()
|
||||
ref, err = reference.WithDigest(ms.name, dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
u, err := ms.ub.BuildManifestURL(ms.name, ref)
|
||||
u, err := ms.ub.BuildManifestURL(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -371,8 +419,8 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
|
||||
req.Header.Add("Accept", t)
|
||||
}
|
||||
|
||||
if _, ok := ms.etags[ref]; ok {
|
||||
req.Header.Set("If-None-Match", ms.etags[ref])
|
||||
if _, ok := ms.etags[digestOrTag]; ok {
|
||||
req.Header.Set("If-None-Match", ms.etags[digestOrTag])
|
||||
}
|
||||
|
||||
resp, err := ms.client.Do(req)
|
||||
@@ -414,13 +462,19 @@ func (o withTagOption) Apply(m distribution.ManifestService) error {
|
||||
}
|
||||
|
||||
// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
|
||||
// tag name in order to build the correct upload URL. This state is written and read under a lock.
|
||||
// tag name in order to build the correct upload URL.
|
||||
func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
|
||||
var tag string
|
||||
ref := ms.name
|
||||
var tagged bool
|
||||
|
||||
for _, option := range options {
|
||||
if opt, ok := option.(withTagOption); ok {
|
||||
tag = opt.tag
|
||||
var err error
|
||||
ref, err = reference.WithTag(ref, opt.tag)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
tagged = true
|
||||
} else {
|
||||
err := option.Apply(ms)
|
||||
if err != nil {
|
||||
@@ -428,13 +482,24 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options .
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
manifestURL, err := ms.ub.BuildManifestURL(ms.name, tag)
|
||||
mediaType, p, err := m.Payload()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
mediaType, p, err := m.Payload()
|
||||
if !tagged {
|
||||
// generate a canonical digest and Put by digest
|
||||
_, d, err := distribution.UnmarshalManifest(mediaType, p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ref, err = reference.WithDigest(ref, d.Digest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
manifestURL, err := ms.ub.BuildManifestURL(ref)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -466,7 +531,11 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options .
|
||||
}
|
||||
|
||||
func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
u, err := ms.ub.BuildManifestURL(ms.name, dgst.String())
|
||||
ref, err := reference.WithDigest(ms.name, dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u, err := ms.ub.BuildManifestURL(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -493,7 +562,7 @@ func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
}*/
|
||||
|
||||
type blobs struct {
|
||||
name string
|
||||
name reference.Named
|
||||
ub *v2.URLBuilder
|
||||
client *http.Client
|
||||
|
||||
@@ -531,7 +600,11 @@ func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
|
||||
blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst)
|
||||
ref, err := reference.WithDigest(bs.name, dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobURL, err := bs.ub.BuildBlobURL(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -666,13 +739,17 @@ func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
}
|
||||
|
||||
type blobStatter struct {
|
||||
name string
|
||||
name reference.Named
|
||||
ub *v2.URLBuilder
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
u, err := bs.ub.BuildBlobURL(bs.name, dgst)
|
||||
ref, err := reference.WithDigest(bs.name, dgst)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
u, err := bs.ub.BuildBlobURL(ref)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
@@ -720,7 +797,11 @@ func buildCatalogValues(maxEntries int, last string) url.Values {
|
||||
}
|
||||
|
||||
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||
blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst)
|
||||
ref, err := reference.WithDigest(bs.name, dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobURL, err := bs.ub.BuildBlobURL(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
97
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
97
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
@@ -1,12 +1,22 @@
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
|
||||
|
||||
// ErrWrongCodeForByteRange is returned if the client sends a request
|
||||
// with a Range header but the server returns a 2xx or 3xx code other
|
||||
// than 206 Partial Content.
|
||||
ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
|
||||
)
|
||||
|
||||
// ReadSeekCloser combines io.ReadSeeker with io.Closer.
|
||||
@@ -40,8 +50,6 @@ type httpReadSeeker struct {
|
||||
|
||||
// rc is the remote read closer.
|
||||
rc io.ReadCloser
|
||||
// brd is a buffer for internal buffered io.
|
||||
brd *bufio.Reader
|
||||
// readerOffset tracks the offset as of the last read.
|
||||
readerOffset int64
|
||||
// seekOffset allows Seek to override the offset. Seek changes
|
||||
@@ -79,11 +87,6 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
|
||||
hrs.seekOffset += int64(n)
|
||||
hrs.readerOffset += int64(n)
|
||||
|
||||
// Simulate io.EOF error if we reach filesize.
|
||||
if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size {
|
||||
err = io.EOF
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
@@ -92,8 +95,18 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, hrs.err
|
||||
}
|
||||
|
||||
lastReaderOffset := hrs.readerOffset
|
||||
|
||||
if whence == os.SEEK_SET && hrs.rc == nil {
|
||||
// If no request has been made yet, and we are seeking to an
|
||||
// absolute position, set the read offset as well to avoid an
|
||||
// unnecessary request.
|
||||
hrs.readerOffset = offset
|
||||
}
|
||||
|
||||
_, err := hrs.reader()
|
||||
if err != nil {
|
||||
hrs.readerOffset = lastReaderOffset
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -101,14 +114,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
switch whence {
|
||||
case os.SEEK_CUR:
|
||||
newOffset += int64(offset)
|
||||
newOffset += offset
|
||||
case os.SEEK_END:
|
||||
if hrs.size < 0 {
|
||||
return 0, errors.New("content length not known")
|
||||
}
|
||||
newOffset = hrs.size + int64(offset)
|
||||
newOffset = hrs.size + offset
|
||||
case os.SEEK_SET:
|
||||
newOffset = int64(offset)
|
||||
newOffset = offset
|
||||
}
|
||||
|
||||
if newOffset < 0 {
|
||||
@@ -131,7 +144,6 @@ func (hrs *httpReadSeeker) Close() error {
|
||||
}
|
||||
|
||||
hrs.rc = nil
|
||||
hrs.brd = nil
|
||||
|
||||
hrs.err = errors.New("httpLayer: closed")
|
||||
|
||||
@@ -154,7 +166,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
||||
}
|
||||
|
||||
if hrs.rc != nil {
|
||||
return hrs.brd, nil
|
||||
return hrs.rc, nil
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", hrs.url, nil)
|
||||
@@ -163,10 +175,8 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
||||
}
|
||||
|
||||
if hrs.readerOffset > 0 {
|
||||
// TODO(stevvooe): Get this working correctly.
|
||||
|
||||
// If we are at different offset, issue a range request from there.
|
||||
req.Header.Add("Range", "1-")
|
||||
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
|
||||
// TODO: get context in here
|
||||
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
|
||||
}
|
||||
@@ -179,12 +189,55 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
||||
// Normally would use client.SuccessStatus, but that would be a cyclic
|
||||
// import
|
||||
if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
|
||||
hrs.rc = resp.Body
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
if hrs.readerOffset > 0 {
|
||||
if resp.StatusCode != http.StatusPartialContent {
|
||||
return nil, ErrWrongCodeForByteRange
|
||||
}
|
||||
|
||||
contentRange := resp.Header.Get("Content-Range")
|
||||
if contentRange == "" {
|
||||
return nil, errors.New("no Content-Range header found in HTTP 206 response")
|
||||
}
|
||||
|
||||
submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
|
||||
if len(submatches) < 4 {
|
||||
return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
|
||||
}
|
||||
|
||||
startByte, err := strconv.ParseUint(submatches[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
|
||||
}
|
||||
|
||||
if startByte != uint64(hrs.readerOffset) {
|
||||
return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
|
||||
}
|
||||
|
||||
endByte, err := strconv.ParseUint(submatches[2], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
|
||||
}
|
||||
|
||||
if submatches[3] == "*" {
|
||||
hrs.size = -1
|
||||
} else {
|
||||
size, err := strconv.ParseUint(submatches[3], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
|
||||
}
|
||||
|
||||
if endByte+1 != size {
|
||||
return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
|
||||
}
|
||||
|
||||
hrs.size = int64(size)
|
||||
}
|
||||
} else if resp.StatusCode == http.StatusOK {
|
||||
hrs.size = resp.ContentLength
|
||||
} else {
|
||||
hrs.size = -1
|
||||
}
|
||||
hrs.rc = resp.Body
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if hrs.errorHandler != nil {
|
||||
@@ -193,11 +246,5 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
||||
return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
|
||||
}
|
||||
|
||||
if hrs.brd == nil {
|
||||
hrs.brd = bufio.NewReader(hrs.rc)
|
||||
} else {
|
||||
hrs.brd.Reset(hrs.rc)
|
||||
}
|
||||
|
||||
return hrs.brd, nil
|
||||
return hrs.rc, nil
|
||||
}
|
||||
|
||||
5
vendor/github.com/docker/docker/api/common.go
generated
vendored
5
vendor/github.com/docker/docker/api/common.go
generated
vendored
@@ -18,14 +18,11 @@ import (
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// Version of Current REST API
|
||||
DefaultVersion version.Version = "1.22"
|
||||
DefaultVersion version.Version = "1.23"
|
||||
|
||||
// MinVersion represents Minimum REST API version supported
|
||||
MinVersion version.Version = "1.12"
|
||||
|
||||
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
|
||||
DefaultDockerfileName string = "Dockerfile"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
NoBaseImageSpecifier string = "scratch"
|
||||
|
||||
36
vendor/github.com/docker/docker/cliconfig/config.go
generated
vendored
36
vendor/github.com/docker/docker/cliconfig/config.go
generated
vendored
@@ -17,6 +17,7 @@ import (
|
||||
const (
|
||||
// ConfigFileName is the name of config file
|
||||
ConfigFileName = "config.json"
|
||||
configFileDir = ".docker"
|
||||
oldConfigfile = ".dockercfg"
|
||||
|
||||
// This constant is only used for really old config files when the
|
||||
@@ -31,7 +32,7 @@ var (
|
||||
|
||||
func init() {
|
||||
if configDir == "" {
|
||||
configDir = filepath.Join(homedir.Get(), ".docker")
|
||||
configDir = filepath.Join(homedir.Get(), configFileDir)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,12 +48,13 @@ func SetConfigDir(dir string) {
|
||||
|
||||
// ConfigFile ~/.docker/config.json file info
|
||||
type ConfigFile struct {
|
||||
AuthConfigs map[string]types.AuthConfig `json:"auths"`
|
||||
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
||||
PsFormat string `json:"psFormat,omitempty"`
|
||||
ImagesFormat string `json:"imagesFormat,omitempty"`
|
||||
DetachKeys string `json:"detachKeys,omitempty"`
|
||||
filename string // Note: not serialized - for internal use only
|
||||
AuthConfigs map[string]types.AuthConfig `json:"auths"`
|
||||
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
||||
PsFormat string `json:"psFormat,omitempty"`
|
||||
ImagesFormat string `json:"imagesFormat,omitempty"`
|
||||
DetachKeys string `json:"detachKeys,omitempty"`
|
||||
CredentialsStore string `json:"credsStore,omitempty"`
|
||||
filename string // Note: not serialized - for internal use only
|
||||
}
|
||||
|
||||
// NewConfigFile initializes an empty configuration file for the given filename 'fn'
|
||||
@@ -86,11 +88,6 @@ func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origEmail := strings.Split(arr[1], " = ")
|
||||
if len(origEmail) != 2 {
|
||||
return fmt.Errorf("Invalid Auth config file")
|
||||
}
|
||||
authConfig.Email = origEmail[1]
|
||||
authConfig.ServerAddress = defaultIndexserver
|
||||
configFile.AuthConfigs[defaultIndexserver] = authConfig
|
||||
} else {
|
||||
@@ -126,6 +123,13 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContainsAuth returns whether there is authentication configured
|
||||
// in this file or not.
|
||||
func (configFile *ConfigFile) ContainsAuth() bool {
|
||||
return configFile.CredentialsStore != "" ||
|
||||
(configFile.AuthConfigs != nil && len(configFile.AuthConfigs) > 0)
|
||||
}
|
||||
|
||||
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
|
||||
// a non-nested reader
|
||||
func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) {
|
||||
@@ -249,6 +253,10 @@ func (configFile *ConfigFile) Filename() string {
|
||||
|
||||
// encodeAuth creates a base64 encoded string to containing authorization information
|
||||
func encodeAuth(authConfig *types.AuthConfig) string {
|
||||
if authConfig.Username == "" && authConfig.Password == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
authStr := authConfig.Username + ":" + authConfig.Password
|
||||
msg := []byte(authStr)
|
||||
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
|
||||
@@ -258,6 +266,10 @@ func encodeAuth(authConfig *types.AuthConfig) string {
|
||||
|
||||
// decodeAuth decodes a base64 encoded string and returns username and password
|
||||
func decodeAuth(authStr string) (string, string, error) {
|
||||
if authStr == "" {
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
decLen := base64.StdEncoding.DecodedLen(len(authStr))
|
||||
decoded := make([]byte, decLen)
|
||||
authByte := []byte(authStr)
|
||||
|
||||
18
vendor/github.com/docker/docker/daemon/graphdriver/driver.go
generated
vendored
18
vendor/github.com/docker/docker/daemon/graphdriver/driver.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
@@ -92,6 +93,23 @@ type Driver interface {
|
||||
DiffSize(id, parent string) (size int64, err error)
|
||||
}
|
||||
|
||||
// DiffGetterDriver is the interface for layered file system drivers that
|
||||
// provide a specialized function for getting file contents for tar-split.
|
||||
type DiffGetterDriver interface {
|
||||
Driver
|
||||
// DiffGetter returns an interface to efficiently retrieve the contents
|
||||
// of files in a layer.
|
||||
DiffGetter(id string) (FileGetCloser, error)
|
||||
}
|
||||
|
||||
// FileGetCloser extends the storage.FileGetter interface with a Close method
|
||||
// for cleaning up.
|
||||
type FileGetCloser interface {
|
||||
storage.FileGetter
|
||||
// Close cleans up any resources associated with the FileGetCloser.
|
||||
Close() error
|
||||
}
|
||||
|
||||
func init() {
|
||||
drivers = make(map[string]InitFunc)
|
||||
}
|
||||
|
||||
106
vendor/github.com/docker/docker/distribution/errors.go
generated
vendored
Normal file
106
vendor/github.com/docker/docker/distribution/errors.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
)
|
||||
|
||||
// ErrNoSupport is an error type used for errors indicating that an operation
|
||||
// is not supported. It encapsulates a more specific error.
|
||||
type ErrNoSupport struct{ Err error }
|
||||
|
||||
func (e ErrNoSupport) Error() string {
|
||||
if e.Err == nil {
|
||||
return "not supported"
|
||||
}
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// fallbackError wraps an error that can possibly allow fallback to a different
|
||||
// endpoint.
|
||||
type fallbackError struct {
|
||||
// err is the error being wrapped.
|
||||
err error
|
||||
// confirmedV2 is set to true if it was confirmed that the registry
|
||||
// supports the v2 protocol. This is used to limit fallbacks to the v1
|
||||
// protocol.
|
||||
confirmedV2 bool
|
||||
// transportOK is set to true if we managed to speak HTTP with the
|
||||
// registry. This confirms that we're using appropriate TLS settings
|
||||
// (or lack of TLS).
|
||||
transportOK bool
|
||||
}
|
||||
|
||||
// Error renders the FallbackError as a string.
|
||||
func (f fallbackError) Error() string {
|
||||
return f.err.Error()
|
||||
}
|
||||
|
||||
// shouldV2Fallback returns true if this error is a reason to fall back to v1.
|
||||
func shouldV2Fallback(err errcode.Error) bool {
|
||||
switch err.Code {
|
||||
case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// continueOnError returns true if we should fallback to the next endpoint
|
||||
// as a result of this error.
|
||||
func continueOnError(err error) bool {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) == 0 {
|
||||
return true
|
||||
}
|
||||
return continueOnError(v[0])
|
||||
case ErrNoSupport:
|
||||
return continueOnError(v.Err)
|
||||
case errcode.Error:
|
||||
return shouldV2Fallback(v)
|
||||
case *client.UnexpectedHTTPResponseError:
|
||||
return true
|
||||
case ImageConfigPullError:
|
||||
return false
|
||||
case error:
|
||||
return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error()))
|
||||
}
|
||||
// let's be nice and fallback if the error is a completely
|
||||
// unexpected one.
|
||||
// If new errors have to be handled in some way, please
|
||||
// add them to the switch above.
|
||||
return true
|
||||
}
|
||||
|
||||
// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the
|
||||
// operation after this error.
|
||||
func retryOnError(err error) error {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
return retryOnError(v[0])
|
||||
case errcode.Error:
|
||||
switch v.Code {
|
||||
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied:
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
case *url.Error:
|
||||
return retryOnError(v.Err)
|
||||
case *client.UnexpectedHTTPResponseError:
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
case error:
|
||||
if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
}
|
||||
// let's be nice and fallback if the error is a completely
|
||||
// unexpected one.
|
||||
// If new errors have to be handled in some way, please
|
||||
// add them to the switch above.
|
||||
return err
|
||||
}
|
||||
38
vendor/github.com/docker/docker/distribution/pull.go
generated
vendored
38
vendor/github.com/docker/docker/distribution/pull.go
generated
vendored
@@ -2,7 +2,6 @@ package distribution
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api"
|
||||
@@ -19,7 +18,6 @@ import (
|
||||
// ImagePullConfig stores pull configuration.
|
||||
type ImagePullConfig struct {
|
||||
// MetaHeaders stores HTTP headers with metadata about the image
|
||||
// (DockerHeaders with prefix X-Meta- in the request).
|
||||
MetaHeaders map[string][]string
|
||||
// AuthConfig holds authentication credentials for authenticating with
|
||||
// the registry.
|
||||
@@ -90,7 +88,7 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
|
||||
return err
|
||||
}
|
||||
|
||||
endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo)
|
||||
endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -111,12 +109,25 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
|
||||
// confirm that it was talking to a v2 registry. This will
|
||||
// prevent fallback to the v1 protocol.
|
||||
confirmedV2 bool
|
||||
|
||||
// confirmedTLSRegistries is a map indicating which registries
|
||||
// are known to be using TLS. There should never be a plaintext
|
||||
// retry for any of these.
|
||||
confirmedTLSRegistries = make(map[string]struct{})
|
||||
)
|
||||
for _, endpoint := range endpoints {
|
||||
if confirmedV2 && endpoint.Version == registry.APIVersion1 {
|
||||
logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
|
||||
if endpoint.URL.Scheme != "https" {
|
||||
if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
|
||||
logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version)
|
||||
|
||||
puller, err := newPuller(endpoint, repoInfo, imagePullConfig)
|
||||
@@ -134,11 +145,14 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
|
||||
if fallbackErr, ok := err.(fallbackError); ok {
|
||||
fallback = true
|
||||
confirmedV2 = confirmedV2 || fallbackErr.confirmedV2
|
||||
if fallbackErr.transportOK && endpoint.URL.Scheme == "https" {
|
||||
confirmedTLSRegistries[endpoint.URL.Host] = struct{}{}
|
||||
}
|
||||
err = fallbackErr.err
|
||||
}
|
||||
}
|
||||
if fallback {
|
||||
if _, ok := err.(registry.ErrNoSupport); !ok {
|
||||
if _, ok := err.(ErrNoSupport); !ok {
|
||||
// Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors.
|
||||
discardNoSupportErrors = true
|
||||
// append subsequent errors
|
||||
@@ -149,9 +163,10 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
|
||||
// append subsequent errors
|
||||
lastErr = err
|
||||
}
|
||||
logrus.Errorf("Attempting next endpoint for pull after error: %v", err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Not continuing with error: %v", err)
|
||||
logrus.Errorf("Not continuing with pull after error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -188,16 +203,3 @@ func validateRepoName(name string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tmpFileClose creates a closer function for a temporary file that closes the file
|
||||
// and also deletes it.
|
||||
func tmpFileCloser(tmpFile *os.File) func() error {
|
||||
return func() error {
|
||||
tmpFile.Close()
|
||||
if err := os.RemoveAll(tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
30
vendor/github.com/docker/docker/distribution/pull_v1.go
generated
vendored
30
vendor/github.com/docker/docker/distribution/pull_v1.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
@@ -36,7 +38,7 @@ type v1Puller struct {
|
||||
func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error {
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
// Allowing fallback, because HTTPS v1 is before HTTP v2
|
||||
return fallbackError{err: registry.ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}}
|
||||
return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}}
|
||||
}
|
||||
|
||||
tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
|
||||
@@ -47,10 +49,10 @@ func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error {
|
||||
tr := transport.NewTransport(
|
||||
// TODO(tiborvass): was ReceiveTimeout
|
||||
registry.NewTransport(tlsConfig),
|
||||
registry.DockerHeaders(p.config.MetaHeaders)...,
|
||||
registry.DockerHeaders(dockerversion.DockerUserAgent(), p.config.MetaHeaders)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders)
|
||||
v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(), p.config.MetaHeaders)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not get v1 endpoint: %v", err)
|
||||
return fallbackError{err: err}
|
||||
@@ -278,6 +280,7 @@ type v1LayerDescriptor struct {
|
||||
layersDownloaded *bool
|
||||
layerSize int64
|
||||
session *registry.Session
|
||||
tmpFile *os.File
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Key() string {
|
||||
@@ -307,7 +310,7 @@ func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progre
|
||||
}
|
||||
*ld.layersDownloaded = true
|
||||
|
||||
tmpFile, err := ioutil.TempFile("", "GetImageBlob")
|
||||
ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob")
|
||||
if err != nil {
|
||||
layerReader.Close()
|
||||
return nil, 0, err
|
||||
@@ -316,17 +319,28 @@ func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progre
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading")
|
||||
defer reader.Close()
|
||||
|
||||
_, err = io.Copy(tmpFile, reader)
|
||||
_, err = io.Copy(ld.tmpFile, reader)
|
||||
if err != nil {
|
||||
ld.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
progress.Update(progressOutput, ld.ID(), "Download complete")
|
||||
|
||||
logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())
|
||||
logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name())
|
||||
|
||||
tmpFile.Seek(0, 0)
|
||||
return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), ld.layerSize, nil
|
||||
ld.tmpFile.Seek(0, 0)
|
||||
return ld.tmpFile, ld.layerSize, nil
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Close() {
|
||||
if ld.tmpFile != nil {
|
||||
ld.tmpFile.Close()
|
||||
if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
|
||||
}
|
||||
ld.tmpFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) {
|
||||
|
||||
178
vendor/github.com/docker/docker/distribution/pull_v2.go
generated
vendored
178
vendor/github.com/docker/docker/distribution/pull_v2.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
@@ -17,6 +18,8 @@ import (
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/image"
|
||||
@@ -32,6 +35,17 @@ import (
|
||||
|
||||
var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
|
||||
|
||||
// ImageConfigPullError is an error pulling the image config blob
|
||||
// (only applies to schema2).
|
||||
type ImageConfigPullError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error returns the error string for ImageConfigPullError.
|
||||
func (e ImageConfigPullError) Error() string {
|
||||
return "error pulling image configuration: " + e.Err.Error()
|
||||
}
|
||||
|
||||
type v2Puller struct {
|
||||
V2MetadataService *metadata.V2MetadataService
|
||||
endpoint registry.APIEndpoint
|
||||
@@ -48,16 +62,20 @@ func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
|
||||
p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
|
||||
if err != nil {
|
||||
logrus.Warnf("Error getting v2 registry: %v", err)
|
||||
return fallbackError{err: err, confirmedV2: p.confirmedV2}
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.pullV2Repository(ctx, ref); err != nil {
|
||||
if _, ok := err.(fallbackError); ok {
|
||||
return err
|
||||
}
|
||||
if registry.ContinueOnError(err) {
|
||||
logrus.Debugf("Error trying v2 registry: %v", err)
|
||||
return fallbackError{err: err, confirmedV2: p.confirmedV2}
|
||||
if continueOnError(err) {
|
||||
logrus.Errorf("Error trying v2 registry: %v", err)
|
||||
return fallbackError{
|
||||
err: err,
|
||||
confirmedV2: p.confirmedV2,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -114,6 +132,8 @@ type v2LayerDescriptor struct {
|
||||
repoInfo *registry.RepositoryInfo
|
||||
repo distribution.Repository
|
||||
V2MetadataService *metadata.V2MetadataService
|
||||
tmpFile *os.File
|
||||
verifier digest.Verifier
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) Key() string {
|
||||
@@ -131,17 +151,56 @@ func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
|
||||
func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
|
||||
logrus.Debugf("pulling blob %q", ld.digest)
|
||||
|
||||
var (
|
||||
err error
|
||||
offset int64
|
||||
)
|
||||
|
||||
if ld.tmpFile == nil {
|
||||
ld.tmpFile, err = createDownloadFile()
|
||||
if err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
} else {
|
||||
offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
logrus.Debugf("error seeking to end of download file: %v", err)
|
||||
offset = 0
|
||||
|
||||
ld.tmpFile.Close()
|
||||
if err := os.Remove(ld.tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
|
||||
}
|
||||
ld.tmpFile, err = createDownloadFile()
|
||||
if err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
} else if offset != 0 {
|
||||
logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
|
||||
}
|
||||
}
|
||||
|
||||
tmpFile := ld.tmpFile
|
||||
blobs := ld.repo.Blobs(ctx)
|
||||
|
||||
layerDownload, err := blobs.Open(ctx, ld.digest)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error statting layer: %v", err)
|
||||
logrus.Errorf("Error initiating layer download: %v", err)
|
||||
if err == distribution.ErrBlobUnknown {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
return nil, 0, retryOnError(err)
|
||||
}
|
||||
|
||||
if offset != 0 {
|
||||
_, err := layerDownload.Seek(offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
if err := ld.truncateDownloadFile(); err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
size, err := layerDownload.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
// Seek failed, perhaps because there was no Content-Length
|
||||
@@ -149,46 +208,59 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre
|
||||
// still continue without a progress bar.
|
||||
size = 0
|
||||
} else {
|
||||
// Restore the seek offset at the beginning of the stream.
|
||||
_, err = layerDownload.Seek(0, os.SEEK_SET)
|
||||
if size != 0 && offset > size {
|
||||
logrus.Debugf("Partial download is larger than full blob. Starting over")
|
||||
offset = 0
|
||||
if err := ld.truncateDownloadFile(); err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore the seek offset either at the beginning of the
|
||||
// stream, or just after the last byte we have from previous
|
||||
// attempts.
|
||||
_, err = layerDownload.Seek(offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size, ld.ID(), "Downloading")
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
|
||||
defer reader.Close()
|
||||
|
||||
verifier, err := digest.NewDigestVerifier(ld.digest)
|
||||
if err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
if ld.verifier == nil {
|
||||
ld.verifier, err = digest.NewDigestVerifier(ld.digest)
|
||||
if err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
tmpFile, err := ioutil.TempFile("", "GetImageBlob")
|
||||
_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
|
||||
if err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
_, err = io.Copy(tmpFile, io.TeeReader(reader, verifier))
|
||||
if err != nil {
|
||||
tmpFile.Close()
|
||||
if err := os.Remove(tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
|
||||
if err == transport.ErrWrongCodeForByteRange {
|
||||
if err := ld.truncateDownloadFile(); err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, retryOnError(err)
|
||||
}
|
||||
|
||||
progress.Update(progressOutput, ld.ID(), "Verifying Checksum")
|
||||
|
||||
if !verifier.Verified() {
|
||||
if !ld.verifier.Verified() {
|
||||
err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
|
||||
logrus.Error(err)
|
||||
|
||||
tmpFile.Close()
|
||||
if err := os.Remove(tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
|
||||
}
|
||||
// Allow a retry if this digest verification error happened
|
||||
// after a resumed download.
|
||||
if offset != 0 {
|
||||
if err := ld.truncateDownloadFile(); err != nil {
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
@@ -202,9 +274,37 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre
|
||||
if err := os.Remove(tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
|
||||
}
|
||||
ld.tmpFile = nil
|
||||
ld.verifier = nil
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil
|
||||
return tmpFile, size, nil
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) Close() {
|
||||
if ld.tmpFile != nil {
|
||||
ld.tmpFile.Close()
|
||||
if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) truncateDownloadFile() error {
|
||||
// Need a new hash context since we will be redoing the download
|
||||
ld.verifier = nil
|
||||
|
||||
if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil {
|
||||
logrus.Errorf("error seeking to beginning of download file: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ld.tmpFile.Truncate(0); err != nil {
|
||||
logrus.Errorf("error truncating download file: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
|
||||
@@ -250,7 +350,7 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
|
||||
p.confirmedV2 = true
|
||||
|
||||
logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
|
||||
progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())
|
||||
progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())
|
||||
|
||||
var (
|
||||
imageID image.ID
|
||||
@@ -399,7 +499,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
|
||||
go func() {
|
||||
configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
errChan <- ImageConfigPullError{Err: err}
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
@@ -619,12 +719,24 @@ func allowV1Fallback(err error) error {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) != 0 {
|
||||
if v0, ok := v[0].(errcode.Error); ok && registry.ShouldV2Fallback(v0) {
|
||||
return fallbackError{err: err, confirmedV2: false}
|
||||
if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
|
||||
return fallbackError{
|
||||
err: err,
|
||||
confirmedV2: false,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
case errcode.Error:
|
||||
if registry.ShouldV2Fallback(v) {
|
||||
if shouldV2Fallback(v) {
|
||||
return fallbackError{
|
||||
err: err,
|
||||
confirmedV2: false,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
case *url.Error:
|
||||
if v.Err == auth.ErrNoBasicAuthCredentials {
|
||||
return fallbackError{err: err, confirmedV2: false}
|
||||
}
|
||||
}
|
||||
@@ -711,3 +823,7 @@ func fixManifestLayers(m *schema1.Manifest) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createDownloadFile() (*os.File, error) {
|
||||
return ioutil.TempFile("", "GetImageBlob")
|
||||
}
|
||||
|
||||
21
vendor/github.com/docker/docker/distribution/push.go
generated
vendored
21
vendor/github.com/docker/docker/distribution/push.go
generated
vendored
@@ -22,7 +22,6 @@ import (
|
||||
// ImagePushConfig stores push configuration.
|
||||
type ImagePushConfig struct {
|
||||
// MetaHeaders store HTTP headers with metadata about the image
|
||||
// (DockerHeaders with prefix X-Meta- in the request).
|
||||
MetaHeaders map[string][]string
|
||||
// AuthConfig holds authentication credentials for authenticating with
|
||||
// the registry.
|
||||
@@ -101,7 +100,7 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo
|
||||
return err
|
||||
}
|
||||
|
||||
endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo)
|
||||
endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -120,6 +119,11 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo
|
||||
// confirm that it was talking to a v2 registry. This will
|
||||
// prevent fallback to the v1 protocol.
|
||||
confirmedV2 bool
|
||||
|
||||
// confirmedTLSRegistries is a map indicating which registries
|
||||
// are known to be using TLS. There should never be a plaintext
|
||||
// retry for any of these.
|
||||
confirmedTLSRegistries = make(map[string]struct{})
|
||||
)
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
@@ -128,6 +132,13 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo
|
||||
continue
|
||||
}
|
||||
|
||||
if endpoint.URL.Scheme != "https" {
|
||||
if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
|
||||
logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version)
|
||||
|
||||
pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig)
|
||||
@@ -143,13 +154,17 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo
|
||||
default:
|
||||
if fallbackErr, ok := err.(fallbackError); ok {
|
||||
confirmedV2 = confirmedV2 || fallbackErr.confirmedV2
|
||||
if fallbackErr.transportOK && endpoint.URL.Scheme == "https" {
|
||||
confirmedTLSRegistries[endpoint.URL.Host] = struct{}{}
|
||||
}
|
||||
err = fallbackErr.err
|
||||
lastErr = err
|
||||
logrus.Errorf("Attempting next endpoint for push after error: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Not continuing with error: %v", err)
|
||||
logrus.Errorf("Not continuing with push after error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
6
vendor/github.com/docker/docker/distribution/push_v1.go
generated
vendored
6
vendor/github.com/docker/docker/distribution/push_v1.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
@@ -20,7 +21,6 @@ import (
|
||||
)
|
||||
|
||||
type v1Pusher struct {
|
||||
ctx context.Context
|
||||
v1IDService *metadata.V1IDService
|
||||
endpoint registry.APIEndpoint
|
||||
ref reference.Named
|
||||
@@ -38,10 +38,10 @@ func (p *v1Pusher) Push(ctx context.Context) error {
|
||||
tr := transport.NewTransport(
|
||||
// TODO(tiborvass): was NoTimeout
|
||||
registry.NewTransport(tlsConfig),
|
||||
registry.DockerHeaders(p.config.MetaHeaders)...,
|
||||
registry.DockerHeaders(dockerversion.DockerUserAgent(), p.config.MetaHeaders)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders)
|
||||
v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(), p.config.MetaHeaders)
|
||||
if err != nil {
|
||||
logrus.Debugf("Could not get v1 endpoint: %v", err)
|
||||
return fallbackError{err: err}
|
||||
|
||||
140
vendor/github.com/docker/docker/distribution/push_v2.go
generated
vendored
140
vendor/github.com/docker/docker/distribution/push_v2.go
generated
vendored
@@ -42,7 +42,7 @@ type v2Pusher struct {
|
||||
config *ImagePushConfig
|
||||
repo distribution.Repository
|
||||
|
||||
// pushState is state built by the Download functions.
|
||||
// pushState is state built by the Upload functions.
|
||||
pushState pushState
|
||||
}
|
||||
|
||||
@@ -64,12 +64,16 @@ func (p *v2Pusher) Push(ctx context.Context) (err error) {
|
||||
p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
|
||||
if err != nil {
|
||||
logrus.Debugf("Error getting v2 registry: %v", err)
|
||||
return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.pushV2Repository(ctx); err != nil {
|
||||
if registry.ContinueOnError(err) {
|
||||
return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
|
||||
if continueOnError(err) {
|
||||
return fallbackError{
|
||||
err: err,
|
||||
confirmedV2: p.pushState.confirmedV2,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -166,7 +170,11 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, ima
|
||||
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err)
|
||||
|
||||
builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, p.repo.Name(), ref.Tag(), img.RawJSON())
|
||||
manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, img.RawJSON())
|
||||
manifest, err = manifestFromBuilder(ctx, builder, descriptors)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -216,10 +224,11 @@ type v2PushDescriptor struct {
|
||||
repoInfo reference.Named
|
||||
repo distribution.Repository
|
||||
pushState *pushState
|
||||
remoteDescriptor distribution.Descriptor
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Key() string {
|
||||
return "v2push:" + pd.repo.Name() + " " + pd.layer.DiffID().String()
|
||||
return "v2push:" + pd.repo.Named().Name() + " " + pd.layer.DiffID().String()
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) ID() string {
|
||||
@@ -230,16 +239,16 @@ func (pd *v2PushDescriptor) DiffID() layer.DiffID {
|
||||
return pd.layer.DiffID()
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
|
||||
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
|
||||
diffID := pd.DiffID()
|
||||
|
||||
pd.pushState.Lock()
|
||||
if _, ok := pd.pushState.remoteLayers[diffID]; ok {
|
||||
if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok {
|
||||
// it is already known that the push is not needed and
|
||||
// therefore doing a stat is unnecessary
|
||||
pd.pushState.Unlock()
|
||||
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
||||
return nil
|
||||
return descriptor, nil
|
||||
}
|
||||
pd.pushState.Unlock()
|
||||
|
||||
@@ -249,14 +258,14 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
||||
descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState)
|
||||
if err != nil {
|
||||
progress.Update(progressOutput, pd.ID(), "Image push failed")
|
||||
return retryOnError(err)
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
if exists {
|
||||
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
||||
pd.pushState.Lock()
|
||||
pd.pushState.remoteLayers[diffID] = descriptor
|
||||
pd.pushState.Unlock()
|
||||
return nil
|
||||
return descriptor, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,27 +275,29 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
||||
// then push the blob.
|
||||
bs := pd.repo.Blobs(ctx)
|
||||
|
||||
var mountFrom metadata.V2Metadata
|
||||
var layerUpload distribution.BlobWriter
|
||||
mountAttemptsRemaining := 3
|
||||
|
||||
// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
|
||||
for _, metadata := range v2Metadata {
|
||||
sourceRepo, err := reference.ParseNamed(metadata.SourceRepository)
|
||||
// Attempt to find another repository in the same registry to mount the layer
|
||||
// from to avoid an unnecessary upload.
|
||||
// Note: metadata is stored from oldest to newest, so we iterate through this
|
||||
// slice in reverse to maximize our chances of the blob still existing in the
|
||||
// remote repository.
|
||||
for i := len(v2Metadata) - 1; i >= 0 && mountAttemptsRemaining > 0; i-- {
|
||||
mountFrom := v2Metadata[i]
|
||||
|
||||
sourceRepo, err := reference.ParseNamed(mountFrom.SourceRepository)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if pd.repoInfo.Hostname() == sourceRepo.Hostname() {
|
||||
logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, metadata.Digest, sourceRepo.FullName())
|
||||
mountFrom = metadata
|
||||
break
|
||||
if pd.repoInfo.Hostname() != sourceRepo.Hostname() {
|
||||
// don't mount blobs from another registry
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var createOpts []distribution.BlobCreateOption
|
||||
|
||||
if mountFrom.SourceRepository != "" {
|
||||
namedRef, err := reference.WithName(mountFrom.SourceRepository)
|
||||
if err != nil {
|
||||
return err
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO (brianbland): We need to construct a reference where the Name is
|
||||
@@ -294,51 +305,55 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
||||
// richer reference package
|
||||
remoteRef, err := distreference.WithName(namedRef.RemoteName())
|
||||
if err != nil {
|
||||
return err
|
||||
continue
|
||||
}
|
||||
|
||||
canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
continue
|
||||
}
|
||||
|
||||
createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
|
||||
}
|
||||
logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountFrom.Digest, sourceRepo.FullName())
|
||||
|
||||
// Send the layer
|
||||
layerUpload, err := bs.Create(ctx, createOpts...)
|
||||
switch err := err.(type) {
|
||||
case distribution.ErrBlobMounted:
|
||||
progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
|
||||
layerUpload, err = bs.Create(ctx, client.WithMountFrom(canonicalRef))
|
||||
switch err := err.(type) {
|
||||
case distribution.ErrBlobMounted:
|
||||
progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
|
||||
|
||||
err.Descriptor.MediaType = schema2.MediaTypeLayer
|
||||
err.Descriptor.MediaType = schema2.MediaTypeLayer
|
||||
|
||||
pd.pushState.Lock()
|
||||
pd.pushState.confirmedV2 = true
|
||||
pd.pushState.remoteLayers[diffID] = err.Descriptor
|
||||
pd.pushState.Unlock()
|
||||
pd.pushState.Lock()
|
||||
pd.pushState.confirmedV2 = true
|
||||
pd.pushState.remoteLayers[diffID] = err.Descriptor
|
||||
pd.pushState.Unlock()
|
||||
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
|
||||
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
return err.Descriptor, nil
|
||||
case nil:
|
||||
// blob upload session created successfully, so begin the upload
|
||||
mountAttemptsRemaining = 0
|
||||
default:
|
||||
// unable to mount layer from this repository, so this source mapping is no longer valid
|
||||
logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository)
|
||||
pd.v2MetadataService.Remove(mountFrom)
|
||||
mountAttemptsRemaining--
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if mountFrom.SourceRepository != "" {
|
||||
// unable to mount layer from this repository, so this source mapping is no longer valid
|
||||
logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository)
|
||||
pd.v2MetadataService.Remove(mountFrom)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return retryOnError(err)
|
||||
if layerUpload == nil {
|
||||
layerUpload, err = bs.Create(ctx)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
}
|
||||
defer layerUpload.Close()
|
||||
|
||||
arch, err := pd.layer.TarStream()
|
||||
if err != nil {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
// don't care if this fails; best effort
|
||||
@@ -357,12 +372,12 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
||||
nn, err := layerUpload.ReadFrom(tee)
|
||||
compressedReader.Close()
|
||||
if err != nil {
|
||||
return retryOnError(err)
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
|
||||
pushDigest := digester.Digest()
|
||||
if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
|
||||
return retryOnError(err)
|
||||
return distribution.Descriptor{}, retryOnError(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
|
||||
@@ -370,32 +385,33 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
||||
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
pd.pushState.Lock()
|
||||
|
||||
// If Commit succeded, that's an indication that the remote registry
|
||||
// If Commit succeeded, that's an indication that the remote registry
|
||||
// speaks the v2 protocol.
|
||||
pd.pushState.confirmedV2 = true
|
||||
|
||||
pd.pushState.remoteLayers[diffID] = distribution.Descriptor{
|
||||
descriptor := distribution.Descriptor{
|
||||
Digest: pushDigest,
|
||||
MediaType: schema2.MediaTypeLayer,
|
||||
Size: nn,
|
||||
}
|
||||
pd.pushState.remoteLayers[diffID] = descriptor
|
||||
|
||||
pd.pushState.Unlock()
|
||||
|
||||
return nil
|
||||
return descriptor, nil
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) {
|
||||
pd.remoteDescriptor = descriptor
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
|
||||
// Not necessary to lock pushStatus because this is always
|
||||
// called after all the mutation in pushStatus.
|
||||
// By the time this function is called, every layer will have
|
||||
// an entry in remoteLayers.
|
||||
return pd.pushState.remoteLayers[pd.DiffID()]
|
||||
return pd.remoteDescriptor
|
||||
}
|
||||
|
||||
// layerAlreadyExists checks if the registry already know about any of the
|
||||
|
||||
115
vendor/github.com/docker/docker/distribution/registry.go
generated
vendored
115
vendor/github.com/docker/docker/distribution/registry.go
generated
vendored
@@ -5,37 +5,19 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/engine-api/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// fallbackError wraps an error that can possibly allow fallback to a different
|
||||
// endpoint.
|
||||
type fallbackError struct {
|
||||
// err is the error being wrapped.
|
||||
err error
|
||||
// confirmedV2 is set to true if it was confirmed that the registry
|
||||
// supports the v2 protocol. This is used to limit fallbacks to the v1
|
||||
// protocol.
|
||||
confirmedV2 bool
|
||||
}
|
||||
|
||||
// Error renders the FallbackError as a string.
|
||||
func (f fallbackError) Error() string {
|
||||
return f.err.Error()
|
||||
}
|
||||
|
||||
type dumbCredentialStore struct {
|
||||
auth *types.AuthConfig
|
||||
}
|
||||
@@ -68,43 +50,21 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
|
||||
modifiers := registry.DockerHeaders(metaHeaders)
|
||||
modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(), metaHeaders)
|
||||
authTransport := transport.NewTransport(base, modifiers...)
|
||||
pingClient := &http.Client{
|
||||
Transport: authTransport,
|
||||
Timeout: 15 * time.Second,
|
||||
}
|
||||
endpointStr := strings.TrimRight(endpoint.URL, "/") + "/v2/"
|
||||
req, err := http.NewRequest("GET", endpointStr, nil)
|
||||
|
||||
challengeManager, foundVersion, err := registry.PingV2Registry(endpoint, authTransport)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
resp, err := pingClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
v2Version := auth.APIVersion{
|
||||
Type: "registry",
|
||||
Version: "2.0",
|
||||
}
|
||||
|
||||
versions := auth.APIVersions(resp, registry.DefaultRegistryVersionHeader)
|
||||
for _, pingVersion := range versions {
|
||||
if pingVersion == v2Version {
|
||||
// The version header indicates we're definitely
|
||||
// talking to a v2 registry. So don't allow future
|
||||
// fallbacks to the v1 protocol.
|
||||
|
||||
foundVersion = true
|
||||
break
|
||||
transportOK := false
|
||||
if responseErr, ok := err.(registry.PingResponseError); ok {
|
||||
transportOK = true
|
||||
err = responseErr.Err
|
||||
}
|
||||
return nil, foundVersion, fallbackError{
|
||||
err: err,
|
||||
confirmedV2: foundVersion,
|
||||
transportOK: transportOK,
|
||||
}
|
||||
}
|
||||
|
||||
challengeManager := auth.NewSimpleChallengeManager()
|
||||
if err := challengeManager.AddResponse(resp); err != nil {
|
||||
return nil, foundVersion, err
|
||||
}
|
||||
|
||||
if authConfig.RegistryToken != "" {
|
||||
@@ -118,8 +78,24 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end
|
||||
}
|
||||
tr := transport.NewTransport(base, modifiers...)
|
||||
|
||||
repo, err = client.NewRepository(ctx, repoName, endpoint.URL, tr)
|
||||
return repo, foundVersion, err
|
||||
repoNameRef, err := distreference.ParseNamed(repoName)
|
||||
if err != nil {
|
||||
return nil, foundVersion, fallbackError{
|
||||
err: err,
|
||||
confirmedV2: foundVersion,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
|
||||
repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr)
|
||||
if err != nil {
|
||||
err = fallbackError{
|
||||
err: err,
|
||||
confirmedV2: foundVersion,
|
||||
transportOK: true,
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type existingTokenHandler struct {
|
||||
@@ -134,30 +110,3 @@ func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[s
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token))
|
||||
return nil
|
||||
}
|
||||
|
||||
// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the
|
||||
// operation after this error.
|
||||
func retryOnError(err error) error {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
return retryOnError(v[0])
|
||||
case errcode.Error:
|
||||
switch v.Code {
|
||||
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied:
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
case *url.Error:
|
||||
return retryOnError(v.Err)
|
||||
case *client.UnexpectedHTTPResponseError:
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
case error:
|
||||
if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) {
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
}
|
||||
// let's be nice and fallback if the error is a completely
|
||||
// unexpected one.
|
||||
// If new errors have to be handled in some way, please
|
||||
// add them to the switch above.
|
||||
return err
|
||||
}
|
||||
|
||||
6
vendor/github.com/docker/docker/distribution/xfer/download.go
generated
vendored
6
vendor/github.com/docker/docker/distribution/xfer/download.go
generated
vendored
@@ -59,6 +59,10 @@ type DownloadDescriptor interface {
|
||||
DiffID() (layer.DiffID, error)
|
||||
// Download is called to perform the download.
|
||||
Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error)
|
||||
// Close is called when the download manager is finished with this
|
||||
// descriptor and will not call Download again or read from the reader
|
||||
// that Download returned.
|
||||
Close()
|
||||
}
|
||||
|
||||
// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an
|
||||
@@ -229,6 +233,8 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor,
|
||||
retries int
|
||||
)
|
||||
|
||||
defer descriptor.Close()
|
||||
|
||||
for {
|
||||
downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput)
|
||||
if err == nil {
|
||||
|
||||
16
vendor/github.com/docker/docker/distribution/xfer/transfer.go
generated
vendored
16
vendor/github.com/docker/docker/distribution/xfer/transfer.go
generated
vendored
@@ -124,7 +124,6 @@ func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) {
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
t.broadcastDone = true
|
||||
}
|
||||
@@ -159,18 +158,23 @@ func (t *transfer) Watch(progressOutput progress.Output) *Watcher {
|
||||
defer func() {
|
||||
close(w.running)
|
||||
}()
|
||||
done := false
|
||||
var (
|
||||
done bool
|
||||
lastWritten progress.Progress
|
||||
hasLastWritten bool
|
||||
)
|
||||
for {
|
||||
t.mu.Lock()
|
||||
hasLastProgress := t.hasLastProgress
|
||||
lastProgress := t.lastProgress
|
||||
t.mu.Unlock()
|
||||
|
||||
// This might write the last progress item a
|
||||
// second time (since channel closure also gets
|
||||
// us here), but that's fine.
|
||||
if hasLastProgress {
|
||||
// Make sure we don't write the last progress item
|
||||
// twice.
|
||||
if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) {
|
||||
progressOutput.WriteProgress(lastProgress)
|
||||
lastWritten = lastProgress
|
||||
hasLastWritten = true
|
||||
}
|
||||
|
||||
if done {
|
||||
|
||||
23
vendor/github.com/docker/docker/distribution/xfer/upload.go
generated
vendored
23
vendor/github.com/docker/docker/distribution/xfer/upload.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"golang.org/x/net/context"
|
||||
@@ -28,8 +29,8 @@ func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager {
|
||||
type uploadTransfer struct {
|
||||
Transfer
|
||||
|
||||
diffID layer.DiffID
|
||||
err error
|
||||
remoteDescriptor distribution.Descriptor
|
||||
err error
|
||||
}
|
||||
|
||||
// An UploadDescriptor references a layer that may need to be uploaded.
|
||||
@@ -41,7 +42,12 @@ type UploadDescriptor interface {
|
||||
// DiffID should return the DiffID for this layer.
|
||||
DiffID() layer.DiffID
|
||||
// Upload is called to perform the Upload.
|
||||
Upload(ctx context.Context, progressOutput progress.Output) error
|
||||
Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error)
|
||||
// SetRemoteDescriptor provides the distribution.Descriptor that was
|
||||
// returned by Upload. This descriptor is not to be confused with
|
||||
// the UploadDescriptor interface, which is used for internally
|
||||
// identifying layers that are being uploaded.
|
||||
SetRemoteDescriptor(descriptor distribution.Descriptor)
|
||||
}
|
||||
|
||||
// Upload is a blocking function which ensures the listed layers are present on
|
||||
@@ -50,7 +56,7 @@ type UploadDescriptor interface {
|
||||
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
|
||||
var (
|
||||
uploads []*uploadTransfer
|
||||
dedupDescriptors = make(map[string]struct{})
|
||||
dedupDescriptors = make(map[string]*uploadTransfer)
|
||||
)
|
||||
|
||||
for _, descriptor := range layers {
|
||||
@@ -60,12 +66,12 @@ func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescri
|
||||
if _, present := dedupDescriptors[key]; present {
|
||||
continue
|
||||
}
|
||||
dedupDescriptors[key] = struct{}{}
|
||||
|
||||
xferFunc := lum.makeUploadFunc(descriptor)
|
||||
upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput)
|
||||
defer upload.Release(watcher)
|
||||
uploads = append(uploads, upload.(*uploadTransfer))
|
||||
dedupDescriptors[key] = upload.(*uploadTransfer)
|
||||
}
|
||||
|
||||
for _, upload := range uploads {
|
||||
@@ -78,6 +84,9 @@ func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescri
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, l := range layers {
|
||||
l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -86,7 +95,6 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun
|
||||
return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
|
||||
u := &uploadTransfer{
|
||||
Transfer: NewTransfer(),
|
||||
diffID: descriptor.DiffID(),
|
||||
}
|
||||
|
||||
go func() {
|
||||
@@ -105,8 +113,9 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun
|
||||
|
||||
retries := 0
|
||||
for {
|
||||
err := descriptor.Upload(u.Transfer.Context(), progressOutput)
|
||||
remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
|
||||
if err == nil {
|
||||
u.remoteDescriptor = remoteDescriptor
|
||||
break
|
||||
}
|
||||
|
||||
|
||||
24
vendor/github.com/docker/docker/dockerversion/useragent.go
generated
vendored
Normal file
24
vendor/github.com/docker/docker/dockerversion/useragent.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package dockerversion
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/docker/docker/pkg/useragent"
|
||||
)
|
||||
|
||||
// DockerUserAgent is the User-Agent the Docker client uses to identify itself.
|
||||
// It is populated from version information of different components.
|
||||
func DockerUserAgent() string {
|
||||
httpVersion := make([]useragent.VersionInfo, 0, 6)
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version})
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()})
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit})
|
||||
if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()})
|
||||
}
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS})
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH})
|
||||
|
||||
return useragent.AppendVersions("", httpVersion...)
|
||||
}
|
||||
3
vendor/github.com/docker/docker/dockerversion/version_lib.go
generated
vendored
3
vendor/github.com/docker/docker/dockerversion/version_lib.go
generated
vendored
@@ -9,8 +9,5 @@ const (
|
||||
GitCommit string = "library-import"
|
||||
Version string = "library-import"
|
||||
BuildTime string = "library-import"
|
||||
|
||||
IAmStatic string = "library-import"
|
||||
InitSHA1 string = "library-import"
|
||||
InitPath string = "library-import"
|
||||
)
|
||||
|
||||
12
vendor/github.com/docker/docker/image/image.go
generated
vendored
12
vendor/github.com/docker/docker/image/image.go
generated
vendored
@@ -70,6 +70,16 @@ func (img *Image) ID() ID {
|
||||
return img.computedID
|
||||
}
|
||||
|
||||
// ImageID stringizes ID.
|
||||
func (img *Image) ImageID() string {
|
||||
return string(img.ID())
|
||||
}
|
||||
|
||||
// RunConfig returns the image's container config.
|
||||
func (img *Image) RunConfig() *container.Config {
|
||||
return img.Config
|
||||
}
|
||||
|
||||
// MarshalJSON serializes the image to JSON. It sorts the top-level keys so
|
||||
// that JSON that's been manipulated by a push/pull cycle with a legacy
|
||||
// registry won't end up with a different key order.
|
||||
@@ -106,7 +116,7 @@ type History struct {
|
||||
|
||||
// Exporter provides interface for exporting and importing images
|
||||
type Exporter interface {
|
||||
Load(io.ReadCloser, io.Writer) error
|
||||
Load(io.ReadCloser, io.Writer, bool) error
|
||||
// TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error
|
||||
Save([]string, io.Writer) error
|
||||
}
|
||||
|
||||
6
vendor/github.com/docker/docker/image/v1/imagev1.go
generated
vendored
6
vendor/github.com/docker/docker/image/v1/imagev1.go
generated
vendored
@@ -31,7 +31,7 @@ func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error)
|
||||
return image.History{
|
||||
Author: v1Image.Author,
|
||||
Created: v1Image.Created,
|
||||
CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd.Slice(), " "),
|
||||
CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "),
|
||||
Comment: v1Image.Comment,
|
||||
EmptyLayer: emptyLayer,
|
||||
}, nil
|
||||
@@ -97,7 +97,7 @@ func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []im
|
||||
|
||||
delete(c, "id")
|
||||
delete(c, "parent")
|
||||
delete(c, "Size") // Size is calculated from data on disk and is inconsitent
|
||||
delete(c, "Size") // Size is calculated from data on disk and is inconsistent
|
||||
delete(c, "parent_id")
|
||||
delete(c, "layer_id")
|
||||
delete(c, "throwaway")
|
||||
@@ -142,7 +142,7 @@ func rawJSON(value interface{}) *json.RawMessage {
|
||||
// ValidateID checks whether an ID string is a valid image ID.
|
||||
func ValidateID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
return fmt.Errorf("image ID '%s' is invalid ", id)
|
||||
return fmt.Errorf("image ID %q is invalid", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
33
vendor/github.com/docker/docker/layer/layer_store.go
generated
vendored
33
vendor/github.com/docker/docker/layer/layer_store.go
generated
vendored
@@ -577,11 +577,7 @@ func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc Mou
|
||||
}
|
||||
|
||||
func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
|
||||
type diffPathDriver interface {
|
||||
DiffPath(string) (string, func() error, error)
|
||||
}
|
||||
|
||||
diffDriver, ok := ls.driver.(diffPathDriver)
|
||||
diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver)
|
||||
if !ok {
|
||||
diffDriver = &naiveDiffPathDriver{ls.driver}
|
||||
}
|
||||
@@ -589,17 +585,16 @@ func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size
|
||||
defer metadata.Close()
|
||||
|
||||
// get our relative path to the container
|
||||
fsPath, releasePath, err := diffDriver.DiffPath(graphID)
|
||||
fileGetCloser, err := diffDriver.DiffGetter(graphID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer releasePath()
|
||||
defer fileGetCloser.Close()
|
||||
|
||||
metaUnpacker := storage.NewJSONUnpacker(metadata)
|
||||
upackerCounter := &unpackSizeCounter{metaUnpacker, size}
|
||||
fileGetter := storage.NewPathFileGetter(fsPath)
|
||||
logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath)
|
||||
return asm.WriteOutputTarStream(fileGetter, upackerCounter, w)
|
||||
logrus.Debugf("Assembling tar data for %s", graphID)
|
||||
return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w)
|
||||
}
|
||||
|
||||
func (ls *layerStore) Cleanup() error {
|
||||
@@ -618,12 +613,20 @@ type naiveDiffPathDriver struct {
|
||||
graphdriver.Driver
|
||||
}
|
||||
|
||||
func (n *naiveDiffPathDriver) DiffPath(id string) (string, func() error, error) {
|
||||
type fileGetPutter struct {
|
||||
storage.FileGetter
|
||||
driver graphdriver.Driver
|
||||
id string
|
||||
}
|
||||
|
||||
func (w *fileGetPutter) Close() error {
|
||||
return w.driver.Put(w.id)
|
||||
}
|
||||
|
||||
func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
||||
p, err := n.Driver.Get(id, "")
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return nil, err
|
||||
}
|
||||
return p, func() error {
|
||||
return n.Driver.Put(id)
|
||||
}, nil
|
||||
return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil
|
||||
}
|
||||
|
||||
74
vendor/github.com/docker/docker/opts/hosts.go
generated
vendored
74
vendor/github.com/docker/docker/opts/hosts.go
generated
vendored
@@ -4,16 +4,12 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
|
||||
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
|
||||
// is not supplied. A better longer term solution would be to use a named
|
||||
// pipe as the default on the Windows daemon.
|
||||
// These are the IANA registered port numbers for use with Docker
|
||||
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
|
||||
DefaultHTTPPort = 2375 // Default HTTP Port
|
||||
@@ -26,13 +22,19 @@ var (
|
||||
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
||||
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
|
||||
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
|
||||
// DefaultNamedPipe defines the default named pipe used by docker on Windows
|
||||
DefaultNamedPipe = `//./pipe/docker_engine`
|
||||
)
|
||||
|
||||
// ValidateHost validates that the specified string is a valid host and returns it.
|
||||
func ValidateHost(val string) (string, error) {
|
||||
_, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
host := strings.TrimSpace(val)
|
||||
// The empty string means default and is not handled by parseDockerDaemonHost
|
||||
if host != "" {
|
||||
_, err := parseDockerDaemonHost(host)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
}
|
||||
// Note: unlike most flag validators, we don't return the mutated value here
|
||||
// we need to know what the user entered later (using ParseHost) to adjust for tls
|
||||
@@ -40,39 +42,39 @@ func ValidateHost(val string) (string, error) {
|
||||
}
|
||||
|
||||
// ParseHost and set defaults for a Daemon host string
|
||||
func ParseHost(defaultHost, val string) (string, error) {
|
||||
host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
func ParseHost(defaultToTLS bool, val string) (string, error) {
|
||||
host := strings.TrimSpace(val)
|
||||
if host == "" {
|
||||
if defaultToTLS {
|
||||
host = DefaultTLSHost
|
||||
} else {
|
||||
host = DefaultHost
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
host, err = parseDockerDaemonHost(host)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
}
|
||||
return host, nil
|
||||
}
|
||||
|
||||
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
|
||||
// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr
|
||||
// defaultUnixAddr must be a absolute file path (no `unix://` prefix)
|
||||
// defaultTCPAddr must be the full `tcp://host:port` form
|
||||
func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) {
|
||||
addr = strings.TrimSpace(addr)
|
||||
if addr == "" {
|
||||
if defaultAddr == defaultTLSHost {
|
||||
return defaultTLSHost, nil
|
||||
}
|
||||
if runtime.GOOS != "windows" {
|
||||
return fmt.Sprintf("unix://%s", defaultUnixAddr), nil
|
||||
}
|
||||
return defaultTCPAddr, nil
|
||||
}
|
||||
// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
|
||||
func parseDockerDaemonHost(addr string) (string, error) {
|
||||
addrParts := strings.Split(addr, "://")
|
||||
if len(addrParts) == 1 {
|
||||
if len(addrParts) == 1 && addrParts[0] != "" {
|
||||
addrParts = []string{"tcp", addrParts[0]}
|
||||
}
|
||||
|
||||
switch addrParts[0] {
|
||||
case "tcp":
|
||||
return parseTCPAddr(addrParts[1], defaultTCPAddr)
|
||||
return parseTCPAddr(addrParts[1], DefaultTCPHost)
|
||||
case "unix":
|
||||
return parseUnixAddr(addrParts[1], defaultUnixAddr)
|
||||
return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
|
||||
case "npipe":
|
||||
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
|
||||
case "fd":
|
||||
return addr, nil
|
||||
default:
|
||||
@@ -80,19 +82,19 @@ func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defa
|
||||
}
|
||||
}
|
||||
|
||||
// parseUnixAddr parses and validates that the specified address is a valid UNIX
|
||||
// socket address. It returns a formatted UNIX socket address, either using the
|
||||
// address parsed from addr, or the contents of defaultAddr if addr is a blank
|
||||
// string.
|
||||
func parseUnixAddr(addr string, defaultAddr string) (string, error) {
|
||||
addr = strings.TrimPrefix(addr, "unix://")
|
||||
// parseSimpleProtoAddr parses and validates that the specified address is a valid
|
||||
// socket address for simple protocols like unix and npipe. It returns a formatted
|
||||
// socket address, either using the address parsed from addr, or the contents of
|
||||
// defaultAddr if addr is a blank string.
|
||||
func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
|
||||
addr = strings.TrimPrefix(addr, proto+"://")
|
||||
if strings.Contains(addr, "://") {
|
||||
return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
|
||||
return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
|
||||
}
|
||||
if addr == "" {
|
||||
addr = defaultAddr
|
||||
}
|
||||
return fmt.Sprintf("unix://%s", addr), nil
|
||||
return fmt.Sprintf("%s://%s", proto, addr), nil
|
||||
}
|
||||
|
||||
// parseTCPAddr parses and validates that the specified address is a valid TCP
|
||||
|
||||
2
vendor/github.com/docker/docker/opts/hosts_windows.go
generated
vendored
2
vendor/github.com/docker/docker/opts/hosts_windows.go
generated
vendored
@@ -3,4 +3,4 @@
|
||||
package opts
|
||||
|
||||
// DefaultHost constant defines the default host string used by docker on Windows
|
||||
var DefaultHost = DefaultTCPHost
|
||||
var DefaultHost = "npipe://" + DefaultNamedPipe
|
||||
|
||||
48
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
48
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
@@ -502,13 +502,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
defer func() {
|
||||
// Make sure to check the error on Close.
|
||||
if err := ta.TarWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close tar writer: %s", err)
|
||||
logrus.Errorf("Can't close tar writer: %s", err)
|
||||
}
|
||||
if err := compressWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close compress writer: %s", err)
|
||||
logrus.Errorf("Can't close compress writer: %s", err)
|
||||
}
|
||||
if err := pipeWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close pipe writer: %s", err)
|
||||
logrus.Errorf("Can't close pipe writer: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -551,7 +551,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
walkRoot := getWalkRoot(srcPath, include)
|
||||
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
|
||||
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -576,16 +576,42 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
if include != relFilePath {
|
||||
skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error matching %s: %v", relFilePath, err)
|
||||
logrus.Errorf("Error matching %s: %v", relFilePath, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if skip {
|
||||
if !exceptions && f.IsDir() {
|
||||
// If we want to skip this file and its a directory
|
||||
// then we should first check to see if there's an
|
||||
// excludes pattern (eg !dir/file) that starts with this
|
||||
// dir. If so then we can't skip this dir.
|
||||
|
||||
// Its not a dir then so we can just return/skip.
|
||||
if !f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// No exceptions (!...) in patterns so just skip dir
|
||||
if !exceptions {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
|
||||
dirSlash := relFilePath + string(filepath.Separator)
|
||||
|
||||
for _, pat := range patterns {
|
||||
if pat[0] != '!' {
|
||||
continue
|
||||
}
|
||||
pat = pat[1:] + string(filepath.Separator)
|
||||
if strings.HasPrefix(pat, dirSlash) {
|
||||
// found a match - so can't skip this dir
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// No matching exclusion dir so just skip dir
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
if seen[relFilePath] {
|
||||
@@ -607,7 +633,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
if err := ta.addTarFile(filePath, relFilePath); err != nil {
|
||||
logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
|
||||
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
|
||||
// if pipe is broken, stop writting tar stream to it
|
||||
if err == io.ErrClosedPipe {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -660,7 +690,7 @@ loop:
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = system.MkdirAll(parentPath, 0777)
|
||||
err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
12
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
@@ -52,7 +52,7 @@ func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
|
||||
if exclusion(pattern) {
|
||||
pattern = pattern[1:]
|
||||
}
|
||||
patternDirs = append(patternDirs, strings.Split(pattern, "/"))
|
||||
patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
|
||||
}
|
||||
|
||||
return cleanedPatterns, patternDirs, exceptions, nil
|
||||
@@ -83,8 +83,9 @@ func Matches(file string, patterns []string) (bool, error) {
|
||||
// The more generic fileutils.Matches() can't make these assumptions.
|
||||
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
|
||||
matched := false
|
||||
file = filepath.FromSlash(file)
|
||||
parentPath := filepath.Dir(file)
|
||||
parentPathDirs := strings.Split(parentPath, "/")
|
||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||
|
||||
for i, pattern := range patterns {
|
||||
negative := false
|
||||
@@ -102,8 +103,8 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
|
||||
if !match && parentPath != "." {
|
||||
// Check to see if the pattern matches one of our parent dirs.
|
||||
if len(patDirs[i]) <= len(parentPathDirs) {
|
||||
match, _ = regexpMatch(strings.Join(patDirs[i], "/"),
|
||||
strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
|
||||
match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
|
||||
strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,6 +126,9 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
|
||||
// of directories. This means that we should be backwards compatible
|
||||
// with filepath.Match(). We'll end up supporting more stuff, due to
|
||||
// the fact that we're using regexp, but that's ok - it does no harm.
|
||||
//
|
||||
// As per the comment in golangs filepath.Match, on Windows, escaping
|
||||
// is disabled. Instead, '\\' is treated as path separator.
|
||||
func regexpMatch(pattern, path string) (bool, error) {
|
||||
regStr := "^"
|
||||
|
||||
|
||||
2
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
@@ -171,7 +171,7 @@ func parseSubidFile(path, username string) (ranges, error) {
|
||||
}
|
||||
|
||||
text := strings.TrimSpace(s.Text())
|
||||
if text == "" {
|
||||
if text == "" || strings.HasPrefix(text, "#") {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(text, ":")
|
||||
|
||||
82
vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
generated
vendored
82
vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
generated
vendored
@@ -1,9 +1,7 @@
|
||||
package ioutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -11,45 +9,43 @@ import (
|
||||
// is a flush. In addition, the Close method can be called to intercept
|
||||
// Read/Write calls if the targets lifecycle has already ended.
|
||||
type WriteFlusher struct {
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
flusher http.Flusher
|
||||
flushed bool
|
||||
closed error
|
||||
|
||||
// TODO(stevvooe): Use channel for closed instead, remove mutex. Using a
|
||||
// channel will allow one to properly order the operations.
|
||||
w io.Writer
|
||||
flusher flusher
|
||||
flushed chan struct{}
|
||||
flushedOnce sync.Once
|
||||
closed chan struct{}
|
||||
closeLock sync.Mutex
|
||||
}
|
||||
|
||||
var errWriteFlusherClosed = errors.New("writeflusher: closed")
|
||||
type flusher interface {
|
||||
Flush()
|
||||
}
|
||||
|
||||
var errWriteFlusherClosed = io.EOF
|
||||
|
||||
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
||||
wf.mu.Lock()
|
||||
defer wf.mu.Unlock()
|
||||
if wf.closed != nil {
|
||||
return 0, wf.closed
|
||||
select {
|
||||
case <-wf.closed:
|
||||
return 0, errWriteFlusherClosed
|
||||
default:
|
||||
}
|
||||
|
||||
n, err = wf.w.Write(b)
|
||||
wf.flush() // every write is a flush.
|
||||
wf.Flush() // every write is a flush.
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Flush the stream immediately.
|
||||
func (wf *WriteFlusher) Flush() {
|
||||
wf.mu.Lock()
|
||||
defer wf.mu.Unlock()
|
||||
|
||||
wf.flush()
|
||||
}
|
||||
|
||||
// flush the stream immediately without taking a lock. Used internally.
|
||||
func (wf *WriteFlusher) flush() {
|
||||
if wf.closed != nil {
|
||||
select {
|
||||
case <-wf.closed:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
wf.flushed = true
|
||||
wf.flushedOnce.Do(func() {
|
||||
close(wf.flushed)
|
||||
})
|
||||
wf.flusher.Flush()
|
||||
}
|
||||
|
||||
@@ -59,34 +55,38 @@ func (wf *WriteFlusher) Flushed() bool {
|
||||
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
|
||||
// be used to detect whether or a response code has been issued or not.
|
||||
// Another hook should be used instead.
|
||||
wf.mu.Lock()
|
||||
defer wf.mu.Unlock()
|
||||
|
||||
return wf.flushed
|
||||
var flushed bool
|
||||
select {
|
||||
case <-wf.flushed:
|
||||
flushed = true
|
||||
default:
|
||||
}
|
||||
return flushed
|
||||
}
|
||||
|
||||
// Close closes the write flusher, disallowing any further writes to the
|
||||
// target. After the flusher is closed, all calls to write or flush will
|
||||
// result in an error.
|
||||
func (wf *WriteFlusher) Close() error {
|
||||
wf.mu.Lock()
|
||||
defer wf.mu.Unlock()
|
||||
wf.closeLock.Lock()
|
||||
defer wf.closeLock.Unlock()
|
||||
|
||||
if wf.closed != nil {
|
||||
return wf.closed
|
||||
select {
|
||||
case <-wf.closed:
|
||||
return errWriteFlusherClosed
|
||||
default:
|
||||
close(wf.closed)
|
||||
}
|
||||
|
||||
wf.closed = errWriteFlusherClosed
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWriteFlusher returns a new WriteFlusher.
|
||||
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
||||
var flusher http.Flusher
|
||||
if f, ok := w.(http.Flusher); ok {
|
||||
flusher = f
|
||||
var fl flusher
|
||||
if f, ok := w.(flusher); ok {
|
||||
fl = f
|
||||
} else {
|
||||
flusher = &NopFlusher{}
|
||||
fl = &NopFlusher{}
|
||||
}
|
||||
return &WriteFlusher{w: w, flusher: flusher}
|
||||
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
|
||||
}
|
||||
|
||||
2
vendor/github.com/docker/docker/pkg/mflag/flag.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/mflag/flag.go
generated
vendored
@@ -1163,7 +1163,7 @@ func (fs *FlagSet) ReportError(str string, withHelp bool) {
|
||||
str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'"
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(fs.Out(), "docker: %s.\n", str)
|
||||
fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str)
|
||||
}
|
||||
|
||||
// Parsed reports whether fs.Parse has been called.
|
||||
|
||||
5
vendor/github.com/docker/docker/pkg/system/chtimes.go
generated
vendored
5
vendor/github.com/docker/docker/pkg/system/chtimes.go
generated
vendored
@@ -43,5 +43,10 @@ func Chtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Take platform specific action for setting create time.
|
||||
if err := setCTime(name, mtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
14
vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
generated
vendored
Normal file
14
vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// +build !windows
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
//setCTime will set the create time on a file. On Unix, the create
|
||||
//time is updated as a side effect of setting the modified time, so
|
||||
//no action is required.
|
||||
func setCTime(path string, ctime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
27
vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
generated
vendored
Normal file
27
vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// +build windows
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
//setCTime will set the create time on a file. On Windows, this requires
|
||||
//calling SetFileTime and explicitly including the create time.
|
||||
func setCTime(path string, ctime time.Time) error {
|
||||
ctimespec := syscall.NsecToTimespec(ctime.UnixNano())
|
||||
pathp, e := syscall.UTF16PtrFromString(path)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
h, e := syscall.CreateFile(pathp,
|
||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
defer syscall.Close(h)
|
||||
c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec))
|
||||
return syscall.SetFileTime(h, &c, nil, nil)
|
||||
}
|
||||
2
vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
generated
vendored
2
vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
generated
vendored
@@ -223,7 +223,7 @@ with matching paths, and orders the list of file sums accordingly [3].
|
||||
* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29
|
||||
* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31
|
||||
|
||||
## Acknowledgements
|
||||
## Acknowledgments
|
||||
|
||||
Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the
|
||||
TarSum calculation.
|
||||
|
||||
1
vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
generated
vendored
@@ -27,7 +27,6 @@ func MakeRaw(fd uintptr) (*State, error) {
|
||||
newState := oldState.termios
|
||||
|
||||
C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
|
||||
newState.Oflag = newState.Oflag | C.OPOST
|
||||
if err := tcset(fd, &newState); err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
1
vendor/github.com/docker/docker/pkg/term/term.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/term/term.go
generated
vendored
@@ -127,6 +127,5 @@ func handleInterrupt(fd uintptr, state *State) {
|
||||
go func() {
|
||||
_ = <-sigchan
|
||||
RestoreTerminal(fd, state)
|
||||
os.Exit(0)
|
||||
}()
|
||||
}
|
||||
|
||||
200
vendor/github.com/docker/docker/pkg/term/term_windows.go
generated
vendored
200
vendor/github.com/docker/docker/pkg/term/term_windows.go
generated
vendored
@@ -3,21 +3,20 @@
|
||||
package term
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/Azure/go-ansiterm/winterm"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/term/windows"
|
||||
)
|
||||
|
||||
// State holds the console mode for the terminal.
|
||||
type State struct {
|
||||
mode uint32
|
||||
inMode, outMode uint32
|
||||
inHandle, outHandle syscall.Handle
|
||||
}
|
||||
|
||||
// Winsize is used for window size.
|
||||
@@ -28,6 +27,15 @@ type Winsize struct {
|
||||
y uint16
|
||||
}
|
||||
|
||||
const (
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
|
||||
enableVirtualTerminalInput = 0x0200
|
||||
enableVirtualTerminalProcessing = 0x0004
|
||||
)
|
||||
|
||||
// usingNativeConsole is true if we are using the Windows native console
|
||||
var usingNativeConsole bool
|
||||
|
||||
// StdStreams returns the standard streams (stdin, stdout, stedrr).
|
||||
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
|
||||
switch {
|
||||
@@ -39,6 +47,7 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
|
||||
return windows.ConsoleStreams()
|
||||
default:
|
||||
if useNativeConsole() {
|
||||
usingNativeConsole = true
|
||||
return os.Stdin, os.Stdout, os.Stderr
|
||||
}
|
||||
return windows.ConsoleStreams()
|
||||
@@ -54,7 +63,7 @@ func useNativeConsole() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Native console is not available major version 10
|
||||
// Native console is not available before major version 10
|
||||
if osv.MajorVersion < 10 {
|
||||
return false
|
||||
}
|
||||
@@ -64,6 +73,17 @@ func useNativeConsole() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the console modes. If this fails, we can't use the native console
|
||||
state, err := getNativeConsole()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Probe the console to see if it can be enabled.
|
||||
if nil != probeNativeConsole(state) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Environment variable override
|
||||
if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" {
|
||||
if e == "1" {
|
||||
@@ -72,32 +92,86 @@ func useNativeConsole() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the handle to stdout
|
||||
stdOutHandle, err := syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the console mode from the consoles stdout handle
|
||||
var mode uint32
|
||||
if err := syscall.GetConsoleMode(stdOutHandle, &mode); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Legacy mode does not have native ANSI emulation.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
|
||||
const enableVirtualTerminalProcessing = 0x0004
|
||||
if mode&enableVirtualTerminalProcessing == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO Windows (Post TP4). The native emulator still has issues which
|
||||
// TODO Windows. The native emulator still has issues which
|
||||
// mean it shouldn't be enabled for everyone. Change this next line to true
|
||||
// to change the default to "enable if available". In the meantime, users
|
||||
// can still try it out by using USE_NATIVE_CONSOLE env variable.
|
||||
return false
|
||||
}
|
||||
|
||||
// getNativeConsole returns the console modes ('state') for the native Windows console
|
||||
func getNativeConsole() (State, error) {
|
||||
var (
|
||||
err error
|
||||
state State
|
||||
)
|
||||
|
||||
// Get the handle to stdout
|
||||
if state.outHandle, err = syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE); err != nil {
|
||||
return state, err
|
||||
}
|
||||
|
||||
// Get the console mode from the consoles stdout handle
|
||||
if err = syscall.GetConsoleMode(state.outHandle, &state.outMode); err != nil {
|
||||
return state, err
|
||||
}
|
||||
|
||||
// Get the handle to stdin
|
||||
if state.inHandle, err = syscall.GetStdHandle(syscall.STD_INPUT_HANDLE); err != nil {
|
||||
return state, err
|
||||
}
|
||||
|
||||
// Get the console mode from the consoles stdin handle
|
||||
if err = syscall.GetConsoleMode(state.inHandle, &state.inMode); err != nil {
|
||||
return state, err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// probeNativeConsole probes the console to determine if native can be supported,
|
||||
func probeNativeConsole(state State) error {
|
||||
if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil {
|
||||
return err
|
||||
}
|
||||
defer winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode)
|
||||
|
||||
if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil {
|
||||
return err
|
||||
}
|
||||
defer winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// enableNativeConsole turns on native console mode
|
||||
func enableNativeConsole(state State) error {
|
||||
if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil {
|
||||
winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) // restore out if we can
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// disableNativeConsole turns off native console mode
|
||||
func disableNativeConsole(state *State) error {
|
||||
// Try and restore both in an out before error checking.
|
||||
errout := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode)
|
||||
errin := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode)
|
||||
if errout != nil {
|
||||
return errout
|
||||
}
|
||||
if errin != nil {
|
||||
return errin
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
|
||||
func GetFdInfo(in interface{}) (uintptr, bool) {
|
||||
return windows.GetHandleInfo(in)
|
||||
@@ -105,7 +179,6 @@ func GetFdInfo(in interface{}) (uintptr, bool) {
|
||||
|
||||
// GetWinsize returns the window size based on the specified file descriptor.
|
||||
func GetWinsize(fd uintptr) (*Winsize, error) {
|
||||
|
||||
info, err := winterm.GetConsoleScreenBufferInfo(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -117,58 +190,9 @@ func GetWinsize(fd uintptr) (*Winsize, error) {
|
||||
x: 0,
|
||||
y: 0}
|
||||
|
||||
// Note: GetWinsize is called frequently -- uncomment only for excessive details
|
||||
// logrus.Debugf("[windows] GetWinsize: Console(%v)", info.String())
|
||||
// logrus.Debugf("[windows] GetWinsize: Width(%v), Height(%v), x(%v), y(%v)", winsize.Width, winsize.Height, winsize.x, winsize.y)
|
||||
return winsize, nil
|
||||
}
|
||||
|
||||
// SetWinsize tries to set the specified window size for the specified file descriptor.
|
||||
func SetWinsize(fd uintptr, ws *Winsize) error {
|
||||
|
||||
// Ensure the requested dimensions are no larger than the maximum window size
|
||||
info, err := winterm.GetConsoleScreenBufferInfo(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ws.Width == 0 || ws.Height == 0 || ws.Width > uint16(info.MaximumWindowSize.X) || ws.Height > uint16(info.MaximumWindowSize.Y) {
|
||||
return fmt.Errorf("Illegal window size: (%v,%v) -- Maximum allow: (%v,%v)",
|
||||
ws.Width, ws.Height, info.MaximumWindowSize.X, info.MaximumWindowSize.Y)
|
||||
}
|
||||
|
||||
// Narrow the sizes to that used by Windows
|
||||
width := winterm.SHORT(ws.Width)
|
||||
height := winterm.SHORT(ws.Height)
|
||||
|
||||
// Set the dimensions while ensuring they remain within the bounds of the backing console buffer
|
||||
// -- Shrinking will always succeed. Growing may push the edges past the buffer boundary. When that occurs,
|
||||
// shift the upper left just enough to keep the new window within the buffer.
|
||||
rect := info.Window
|
||||
if width < rect.Right-rect.Left+1 {
|
||||
rect.Right = rect.Left + width - 1
|
||||
} else if width > rect.Right-rect.Left+1 {
|
||||
rect.Right = rect.Left + width - 1
|
||||
if rect.Right >= info.Size.X {
|
||||
rect.Left = info.Size.X - width
|
||||
rect.Right = info.Size.X - 1
|
||||
}
|
||||
}
|
||||
|
||||
if height < rect.Bottom-rect.Top+1 {
|
||||
rect.Bottom = rect.Top + height - 1
|
||||
} else if height > rect.Bottom-rect.Top+1 {
|
||||
rect.Bottom = rect.Top + height - 1
|
||||
if rect.Bottom >= info.Size.Y {
|
||||
rect.Top = info.Size.Y - height
|
||||
rect.Bottom = info.Size.Y - 1
|
||||
}
|
||||
}
|
||||
logrus.Debugf("[windows] SetWinsize: Requested((%v,%v)) Actual(%v)", ws.Width, ws.Height, rect)
|
||||
|
||||
return winterm.SetConsoleWindowInfo(fd, true, rect)
|
||||
}
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
return windows.IsConsole(fd)
|
||||
@@ -177,25 +201,36 @@ func IsTerminal(fd uintptr) bool {
|
||||
// RestoreTerminal restores the terminal connected to the given file descriptor
|
||||
// to a previous state.
|
||||
func RestoreTerminal(fd uintptr, state *State) error {
|
||||
return winterm.SetConsoleMode(fd, state.mode)
|
||||
if usingNativeConsole {
|
||||
return disableNativeConsole(state)
|
||||
}
|
||||
return winterm.SetConsoleMode(fd, state.outMode)
|
||||
}
|
||||
|
||||
// SaveState saves the state of the terminal connected to the given file descriptor.
|
||||
func SaveState(fd uintptr) (*State, error) {
|
||||
if usingNativeConsole {
|
||||
state, err := getNativeConsole()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
mode, e := winterm.GetConsoleMode(fd)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return &State{mode}, nil
|
||||
|
||||
return &State{outMode: mode}, nil
|
||||
}
|
||||
|
||||
// DisableEcho disables echo for the terminal connected to the given file descriptor.
|
||||
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
|
||||
func DisableEcho(fd uintptr, state *State) error {
|
||||
mode := state.mode
|
||||
mode := state.inMode
|
||||
mode &^= winterm.ENABLE_ECHO_INPUT
|
||||
mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
|
||||
|
||||
err := winterm.SetConsoleMode(fd, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -227,10 +262,17 @@ func MakeRaw(fd uintptr) (*State, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mode := state.inMode
|
||||
if usingNativeConsole {
|
||||
if err := enableNativeConsole(*state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mode |= enableVirtualTerminalInput
|
||||
}
|
||||
|
||||
// See
|
||||
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
||||
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
|
||||
mode := state.mode
|
||||
|
||||
// Disable these modes
|
||||
mode &^= winterm.ENABLE_ECHO_INPUT
|
||||
|
||||
338
vendor/github.com/docker/docker/registry/auth.go
generated
vendored
338
vendor/github.com/docker/docker/registry/auth.go
generated
vendored
@@ -1,35 +1,28 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/engine-api/types"
|
||||
registrytypes "github.com/docker/engine-api/types/registry"
|
||||
)
|
||||
|
||||
// Login tries to register/login to the registry server.
|
||||
func Login(authConfig *types.AuthConfig, registryEndpoint *Endpoint) (string, error) {
|
||||
// Separates the v2 registry login logic from the v1 logic.
|
||||
if registryEndpoint.Version == APIVersion2 {
|
||||
return loginV2(authConfig, registryEndpoint, "" /* scope */)
|
||||
}
|
||||
return loginV1(authConfig, registryEndpoint)
|
||||
}
|
||||
|
||||
// loginV1 tries to register/login to the v1 registry server.
|
||||
func loginV1(authConfig *types.AuthConfig, registryEndpoint *Endpoint) (string, error) {
|
||||
var (
|
||||
status string
|
||||
respBody []byte
|
||||
err error
|
||||
respStatusCode = 0
|
||||
serverAddress = authConfig.ServerAddress
|
||||
)
|
||||
func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, error) {
|
||||
registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
serverAddress := registryEndpoint.String()
|
||||
|
||||
logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint)
|
||||
|
||||
@@ -39,186 +32,121 @@ func loginV1(authConfig *types.AuthConfig, registryEndpoint *Endpoint) (string,
|
||||
|
||||
loginAgainstOfficialIndex := serverAddress == IndexServer
|
||||
|
||||
// to avoid sending the server address to the server it should be removed before being marshaled
|
||||
authCopy := *authConfig
|
||||
authCopy.ServerAddress = ""
|
||||
|
||||
jsonBody, err := json.Marshal(authCopy)
|
||||
req, err := http.NewRequest("GET", serverAddress+"users/", nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Config Error: %s", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
|
||||
b := strings.NewReader(string(jsonBody))
|
||||
resp1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Server Error: %s", err)
|
||||
}
|
||||
defer resp1.Body.Close()
|
||||
respStatusCode = resp1.StatusCode
|
||||
respBody, err = ioutil.ReadAll(resp1.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Server Error: [%#v] %s", respStatusCode, err)
|
||||
}
|
||||
|
||||
if respStatusCode == 201 {
|
||||
if loginAgainstOfficialIndex {
|
||||
status = "Account created. Please use the confirmation link we sent" +
|
||||
" to your e-mail to activate it."
|
||||
} else {
|
||||
// *TODO: Use registry configuration to determine what this says, if anything?
|
||||
status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it."
|
||||
}
|
||||
} else if respStatusCode == 400 {
|
||||
if string(respBody) == "\"Username or email already exists\"" {
|
||||
req, err := http.NewRequest("GET", serverAddress+"users/", nil)
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
resp, err := registryEndpoint.client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
return "Login Succeeded", nil
|
||||
} else if resp.StatusCode == 401 {
|
||||
return "", fmt.Errorf("Wrong login/password, please try again")
|
||||
} else if resp.StatusCode == 403 {
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.")
|
||||
}
|
||||
// *TODO: Use registry configuration to determine what this says, if anything?
|
||||
return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)
|
||||
} else if resp.StatusCode == 500 { // Issue #14326
|
||||
logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body)
|
||||
return "", fmt.Errorf("Internal Server Error")
|
||||
}
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header)
|
||||
}
|
||||
return "", fmt.Errorf("Registration: %s", respBody)
|
||||
|
||||
} else if respStatusCode == 401 {
|
||||
// This case would happen with private registries where /v1/users is
|
||||
// protected, so people can use `docker login` as an auth check.
|
||||
req, err := http.NewRequest("GET", serverAddress+"users/", nil)
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
resp, err := registryEndpoint.client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
return "Login Succeeded", nil
|
||||
} else if resp.StatusCode == 401 {
|
||||
return "", fmt.Errorf("Wrong login/password, please try again")
|
||||
} else {
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
|
||||
resp.StatusCode, resp.Header)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("Unexpected status code [%d] : %s", respStatusCode, respBody)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// loginV2 tries to login to the v2 registry server. The given registry endpoint has been
|
||||
// pinged or setup with a list of authorization challenges. Each of these challenges are
|
||||
// tried until one of them succeeds. Currently supported challenge schemes are:
|
||||
// HTTP Basic Authorization
|
||||
// Token Authorization with a separate token issuing server
|
||||
// NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For
|
||||
// now, users should create their account through other means like directly from a web page
|
||||
// served by the v2 registry service provider. Whether this will be supported in the future
|
||||
// is to be determined.
|
||||
func loginV2(authConfig *types.AuthConfig, registryEndpoint *Endpoint, scope string) (string, error) {
|
||||
logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint)
|
||||
var (
|
||||
err error
|
||||
allErrors []error
|
||||
)
|
||||
|
||||
for _, challenge := range registryEndpoint.AuthChallenges {
|
||||
params := make(map[string]string, len(challenge.Parameters)+1)
|
||||
for k, v := range challenge.Parameters {
|
||||
params[k] = v
|
||||
}
|
||||
params["scope"] = scope
|
||||
logrus.Debugf("trying %q auth challenge with params %v", challenge.Scheme, params)
|
||||
|
||||
switch strings.ToLower(challenge.Scheme) {
|
||||
case "basic":
|
||||
err = tryV2BasicAuthLogin(authConfig, params, registryEndpoint)
|
||||
case "bearer":
|
||||
err = tryV2TokenAuthLogin(authConfig, params, registryEndpoint)
|
||||
default:
|
||||
// Unsupported challenge types are explicitly skipped.
|
||||
err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return "Login Succeeded", nil
|
||||
}
|
||||
|
||||
logrus.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err)
|
||||
|
||||
allErrors = append(allErrors, err)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors)
|
||||
}
|
||||
|
||||
func tryV2BasicAuthLogin(authConfig *types.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error {
|
||||
req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
|
||||
resp, err := registryEndpoint.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
// fallback when request could not be completed
|
||||
return "", fallbackError{
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("basic auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
return "Login Succeeded", nil
|
||||
} else if resp.StatusCode == http.StatusUnauthorized {
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com")
|
||||
}
|
||||
return "", fmt.Errorf("Wrong login/password, please try again")
|
||||
} else if resp.StatusCode == http.StatusForbidden {
|
||||
if loginAgainstOfficialIndex {
|
||||
return "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.")
|
||||
}
|
||||
// *TODO: Use registry configuration to determine what this says, if anything?
|
||||
return "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)
|
||||
} else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326
|
||||
logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body)
|
||||
return "", fmt.Errorf("Internal Server Error")
|
||||
} else {
|
||||
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
|
||||
resp.StatusCode, resp.Header)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tryV2TokenAuthLogin(authConfig *types.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error {
|
||||
token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint)
|
||||
type loginCredentialStore struct {
|
||||
authConfig *types.AuthConfig
|
||||
}
|
||||
|
||||
func (lcs loginCredentialStore) Basic(*url.URL) (string, string) {
|
||||
return lcs.authConfig.Username, lcs.authConfig.Password
|
||||
}
|
||||
|
||||
type fallbackError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (err fallbackError) Error() string {
|
||||
return err.err.Error()
|
||||
}
|
||||
|
||||
// loginV2 tries to login to the v2 registry server. The given registry
|
||||
// endpoint will be pinged to get authorization challenges. These challenges
|
||||
// will be used to authenticate against the registry to validate credentials.
|
||||
func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, error) {
|
||||
logrus.Debugf("attempting v2 login to registry endpoint %s", endpoint)
|
||||
|
||||
modifiers := DockerHeaders(userAgent, nil)
|
||||
authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...)
|
||||
|
||||
challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport)
|
||||
if err != nil {
|
||||
return err
|
||||
if !foundV2 {
|
||||
err = fallbackError{err: err}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
creds := loginCredentialStore{
|
||||
authConfig: authConfig,
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
tokenHandler := auth.NewTokenHandler(authTransport, creds, "")
|
||||
basicHandler := auth.NewBasicHandler(creds)
|
||||
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
|
||||
tr := transport.NewTransport(authTransport, modifiers...)
|
||||
|
||||
resp, err := registryEndpoint.client.Do(req)
|
||||
loginClient := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/"
|
||||
req, err := http.NewRequest("GET", endpointStr, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
if !foundV2 {
|
||||
err = fallbackError{err: err}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
resp, err := loginClient.Do(req)
|
||||
if err != nil {
|
||||
if !foundV2 {
|
||||
err = fallbackError{err: err}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("token auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
// TODO(dmcgowan): Attempt to further interpret result, status code and error code string
|
||||
err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
if !foundV2 {
|
||||
err = fallbackError{err: err}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
return nil
|
||||
return "Login Succeeded", nil
|
||||
|
||||
}
|
||||
|
||||
// ResolveAuthConfig matches an auth configuration to a server address or a URL
|
||||
@@ -253,3 +181,63 @@ func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registryt
|
||||
// When all else fails, return an empty auth config
|
||||
return types.AuthConfig{}
|
||||
}
|
||||
|
||||
// PingResponseError is used when the response from a ping
|
||||
// was received but invalid.
|
||||
type PingResponseError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (err PingResponseError) Error() string {
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
// PingV2Registry attempts to ping a v2 registry and on success return a
|
||||
// challenge manager for the supported authentication types and
|
||||
// whether v2 was confirmed by the response. If a response is received but
|
||||
// cannot be interpreted a PingResponseError will be returned.
|
||||
func PingV2Registry(endpoint APIEndpoint, transport http.RoundTripper) (auth.ChallengeManager, bool, error) {
|
||||
var (
|
||||
foundV2 = false
|
||||
v2Version = auth.APIVersion{
|
||||
Type: "registry",
|
||||
Version: "2.0",
|
||||
}
|
||||
)
|
||||
|
||||
pingClient := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: 15 * time.Second,
|
||||
}
|
||||
endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/"
|
||||
req, err := http.NewRequest("GET", endpointStr, nil)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
resp, err := pingClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
versions := auth.APIVersions(resp, DefaultRegistryVersionHeader)
|
||||
for _, pingVersion := range versions {
|
||||
if pingVersion == v2Version {
|
||||
// The version header indicates we're definitely
|
||||
// talking to a v2 registry. So don't allow future
|
||||
// fallbacks to the v1 protocol.
|
||||
|
||||
foundV2 = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
challengeManager := auth.NewSimpleChallengeManager()
|
||||
if err := challengeManager.AddResponse(resp); err != nil {
|
||||
return nil, foundV2, PingResponseError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return challengeManager, foundV2, nil
|
||||
}
|
||||
|
||||
150
vendor/github.com/docker/docker/registry/authchallenge.go
generated
vendored
150
vendor/github.com/docker/docker/registry/authchallenge.go
generated
vendored
@@ -1,150 +0,0 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
type octetType byte
|
||||
|
||||
// AuthorizationChallenge carries information
|
||||
// from a WWW-Authenticate response header.
|
||||
type AuthorizationChallenge struct {
|
||||
Scheme string
|
||||
Parameters map[string]string
|
||||
}
|
||||
|
||||
var octetTypes [256]octetType
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
||||
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
func parseAuthHeader(header http.Header) []*AuthorizationChallenge {
|
||||
var challenges []*AuthorizationChallenge
|
||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
||||
v, p := parseValueAndParams(h)
|
||||
if v != "" {
|
||||
challenges = append(challenges, &AuthorizationChallenge{Scheme: v, Parameters: p})
|
||||
}
|
||||
}
|
||||
return challenges
|
||||
}
|
||||
|
||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
||||
params = make(map[string]string)
|
||||
value, s := expectToken(header)
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
value = strings.ToLower(value)
|
||||
s = "," + skipSpace(s)
|
||||
for strings.HasPrefix(s, ",") {
|
||||
var pkey string
|
||||
pkey, s = expectToken(skipSpace(s[1:]))
|
||||
if pkey == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return
|
||||
}
|
||||
var pvalue string
|
||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||
if pvalue == "" {
|
||||
return
|
||||
}
|
||||
pkey = strings.ToLower(pkey)
|
||||
params[pkey] = pvalue
|
||||
s = skipSpace(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isToken == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return expectToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + i; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
7
vendor/github.com/docker/docker/registry/config.go
generated
vendored
7
vendor/github.com/docker/docker/registry/config.go
generated
vendored
@@ -19,7 +19,7 @@ type Options struct {
|
||||
InsecureRegistries opts.ListOpts
|
||||
}
|
||||
|
||||
const (
|
||||
var (
|
||||
// DefaultNamespace is the default namespace
|
||||
DefaultNamespace = "docker.io"
|
||||
// DefaultRegistryVersionHeader is the name of the default HTTP header
|
||||
@@ -27,7 +27,7 @@ const (
|
||||
DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version"
|
||||
|
||||
// IndexServer is the v1 registry server used for user auth + account creation
|
||||
IndexServer = DefaultV1Registry + "/v1/"
|
||||
IndexServer = DefaultV1Registry.String() + "/v1/"
|
||||
// IndexName is the name of the index
|
||||
IndexName = "docker.io"
|
||||
|
||||
@@ -49,6 +49,9 @@ var (
|
||||
V2Only = false
|
||||
)
|
||||
|
||||
// for mocking in unit tests
|
||||
var lookupIP = net.LookupIP
|
||||
|
||||
// InstallFlags adds command-line options to the top-level flag parser for
|
||||
// the current process.
|
||||
func (options *Options) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) {
|
||||
|
||||
16
vendor/github.com/docker/docker/registry/config_unix.go
generated
vendored
16
vendor/github.com/docker/docker/registry/config_unix.go
generated
vendored
@@ -2,12 +2,22 @@
|
||||
|
||||
package registry
|
||||
|
||||
const (
|
||||
import (
|
||||
"net/url"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultV1Registry is the URI of the default v1 registry
|
||||
DefaultV1Registry = "https://index.docker.io"
|
||||
DefaultV1Registry = &url.URL{
|
||||
Scheme: "https",
|
||||
Host: "index.docker.io",
|
||||
}
|
||||
|
||||
// DefaultV2Registry is the URI of the default v2 registry
|
||||
DefaultV2Registry = "https://registry-1.docker.io"
|
||||
DefaultV2Registry = &url.URL{
|
||||
Scheme: "https",
|
||||
Host: "registry-1.docker.io",
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
13
vendor/github.com/docker/docker/registry/config_windows.go
generated
vendored
13
vendor/github.com/docker/docker/registry/config_windows.go
generated
vendored
@@ -1,21 +1,28 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
var (
|
||||
// DefaultV1Registry is the URI of the default v1 registry
|
||||
DefaultV1Registry = "https://registry-win-tp3.docker.io"
|
||||
DefaultV1Registry = &url.URL{
|
||||
Scheme: "https",
|
||||
Host: "registry-win-tp3.docker.io",
|
||||
}
|
||||
|
||||
// DefaultV2Registry is the URI of the default (official) v2 registry.
|
||||
// This is the windows-specific endpoint.
|
||||
//
|
||||
// Currently it is a TEMPORARY link that allows Microsoft to continue
|
||||
// development of Docker Engine for Windows.
|
||||
DefaultV2Registry = "https://registry-win-tp3.docker.io"
|
||||
DefaultV2Registry = &url.URL{
|
||||
Scheme: "https",
|
||||
Host: "registry-win-tp3.docker.io",
|
||||
}
|
||||
)
|
||||
|
||||
// CertsDir is the directory where certificates are stored
|
||||
|
||||
277
vendor/github.com/docker/docker/registry/endpoint.go
generated
vendored
277
vendor/github.com/docker/docker/registry/endpoint.go
generated
vendored
@@ -1,277 +0,0 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
registrytypes "github.com/docker/engine-api/types/registry"
|
||||
)
|
||||
|
||||
// for mocking in unit tests
|
||||
var lookupIP = net.LookupIP
|
||||
|
||||
// scans string for api version in the URL path. returns the trimmed address, if version found, string and API version.
|
||||
func scanForAPIVersion(address string) (string, APIVersion) {
|
||||
var (
|
||||
chunks []string
|
||||
apiVersionStr string
|
||||
)
|
||||
|
||||
if strings.HasSuffix(address, "/") {
|
||||
address = address[:len(address)-1]
|
||||
}
|
||||
|
||||
chunks = strings.Split(address, "/")
|
||||
apiVersionStr = chunks[len(chunks)-1]
|
||||
|
||||
for k, v := range apiVersions {
|
||||
if apiVersionStr == v {
|
||||
address = strings.Join(chunks[:len(chunks)-1], "/")
|
||||
return address, k
|
||||
}
|
||||
}
|
||||
|
||||
return address, APIVersionUnknown
|
||||
}
|
||||
|
||||
// NewEndpoint parses the given address to return a registry endpoint. v can be used to
|
||||
// specify a specific endpoint version
|
||||
func NewEndpoint(index *registrytypes.IndexInfo, metaHeaders http.Header, v APIVersion) (*Endpoint, error) {
|
||||
tlsConfig, err := newTLSConfig(index.Name, index.Secure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoint, err := newEndpoint(GetAuthConfigKey(index), tlsConfig, metaHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v != APIVersionUnknown {
|
||||
endpoint.Version = v
|
||||
}
|
||||
if err := validateEndpoint(endpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
func validateEndpoint(endpoint *Endpoint) error {
|
||||
logrus.Debugf("pinging registry endpoint %s", endpoint)
|
||||
|
||||
// Try HTTPS ping to registry
|
||||
endpoint.URL.Scheme = "https"
|
||||
if _, err := endpoint.Ping(); err != nil {
|
||||
if endpoint.IsSecure {
|
||||
// If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry`
|
||||
// in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP.
|
||||
return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host)
|
||||
}
|
||||
|
||||
// If registry is insecure and HTTPS failed, fallback to HTTP.
|
||||
logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err)
|
||||
endpoint.URL.Scheme = "http"
|
||||
|
||||
var err2 error
|
||||
if _, err2 = endpoint.Ping(); err2 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newEndpoint(address string, tlsConfig *tls.Config, metaHeaders http.Header) (*Endpoint, error) {
|
||||
var (
|
||||
endpoint = new(Endpoint)
|
||||
trimmedAddress string
|
||||
err error
|
||||
)
|
||||
|
||||
if !strings.HasPrefix(address, "http") {
|
||||
address = "https://" + address
|
||||
}
|
||||
|
||||
endpoint.IsSecure = (tlsConfig == nil || !tlsConfig.InsecureSkipVerify)
|
||||
|
||||
trimmedAddress, endpoint.Version = scanForAPIVersion(address)
|
||||
|
||||
if endpoint.URL, err = url.Parse(trimmedAddress); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(tiborvass): make sure a ConnectTimeout transport is used
|
||||
tr := NewTransport(tlsConfig)
|
||||
endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(metaHeaders)...))
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// Endpoint stores basic information about a registry endpoint.
|
||||
type Endpoint struct {
|
||||
client *http.Client
|
||||
URL *url.URL
|
||||
Version APIVersion
|
||||
IsSecure bool
|
||||
AuthChallenges []*AuthorizationChallenge
|
||||
URLBuilder *v2.URLBuilder
|
||||
}
|
||||
|
||||
// Get the formatted URL for the root of this registry Endpoint
|
||||
func (e *Endpoint) String() string {
|
||||
return fmt.Sprintf("%s/v%d/", e.URL, e.Version)
|
||||
}
|
||||
|
||||
// VersionString returns a formatted string of this
|
||||
// endpoint address using the given API Version.
|
||||
func (e *Endpoint) VersionString(version APIVersion) string {
|
||||
return fmt.Sprintf("%s/v%d/", e.URL, version)
|
||||
}
|
||||
|
||||
// Path returns a formatted string for the URL
|
||||
// of this endpoint with the given path appended.
|
||||
func (e *Endpoint) Path(path string) string {
|
||||
return fmt.Sprintf("%s/v%d/%s", e.URL, e.Version, path)
|
||||
}
|
||||
|
||||
// Ping pings the remote endpoint with v2 and v1 pings to determine the API
|
||||
// version. It returns a PingResult containing the discovered version. The
|
||||
// PingResult also indicates whether the registry is standalone or not.
|
||||
func (e *Endpoint) Ping() (PingResult, error) {
|
||||
// The ping logic to use is determined by the registry endpoint version.
|
||||
switch e.Version {
|
||||
case APIVersion1:
|
||||
return e.pingV1()
|
||||
case APIVersion2:
|
||||
return e.pingV2()
|
||||
}
|
||||
|
||||
// APIVersionUnknown
|
||||
// We should try v2 first...
|
||||
e.Version = APIVersion2
|
||||
regInfo, errV2 := e.pingV2()
|
||||
if errV2 == nil {
|
||||
return regInfo, nil
|
||||
}
|
||||
|
||||
// ... then fallback to v1.
|
||||
e.Version = APIVersion1
|
||||
regInfo, errV1 := e.pingV1()
|
||||
if errV1 == nil {
|
||||
return regInfo, nil
|
||||
}
|
||||
|
||||
e.Version = APIVersionUnknown
|
||||
return PingResult{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1)
|
||||
}
|
||||
|
||||
func (e *Endpoint) pingV1() (PingResult, error) {
|
||||
logrus.Debugf("attempting v1 ping for registry endpoint %s", e)
|
||||
|
||||
if e.String() == IndexServer {
|
||||
// Skip the check, we know this one is valid
|
||||
// (and we never want to fallback to http in case of error)
|
||||
return PingResult{Standalone: false}, nil
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", e.Path("_ping"), nil)
|
||||
if err != nil {
|
||||
return PingResult{Standalone: false}, err
|
||||
}
|
||||
|
||||
resp, err := e.client.Do(req)
|
||||
if err != nil {
|
||||
return PingResult{Standalone: false}, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
jsonString, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err)
|
||||
}
|
||||
|
||||
// If the header is absent, we assume true for compatibility with earlier
|
||||
// versions of the registry. default to true
|
||||
info := PingResult{
|
||||
Standalone: true,
|
||||
}
|
||||
if err := json.Unmarshal(jsonString, &info); err != nil {
|
||||
logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err)
|
||||
// don't stop here. Just assume sane defaults
|
||||
}
|
||||
if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
|
||||
logrus.Debugf("Registry version header: '%s'", hdr)
|
||||
info.Version = hdr
|
||||
}
|
||||
logrus.Debugf("PingResult.Version: %q", info.Version)
|
||||
|
||||
standalone := resp.Header.Get("X-Docker-Registry-Standalone")
|
||||
logrus.Debugf("Registry standalone header: '%s'", standalone)
|
||||
// Accepted values are "true" (case-insensitive) and "1".
|
||||
if strings.EqualFold(standalone, "true") || standalone == "1" {
|
||||
info.Standalone = true
|
||||
} else if len(standalone) > 0 {
|
||||
// there is a header set, and it is not "true" or "1", so assume fails
|
||||
info.Standalone = false
|
||||
}
|
||||
logrus.Debugf("PingResult.Standalone: %t", info.Standalone)
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (e *Endpoint) pingV2() (PingResult, error) {
|
||||
logrus.Debugf("attempting v2 ping for registry endpoint %s", e)
|
||||
|
||||
req, err := http.NewRequest("GET", e.Path(""), nil)
|
||||
if err != nil {
|
||||
return PingResult{}, err
|
||||
}
|
||||
|
||||
resp, err := e.client.Do(req)
|
||||
if err != nil {
|
||||
return PingResult{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// The endpoint may have multiple supported versions.
|
||||
// Ensure it supports the v2 Registry API.
|
||||
var supportsV2 bool
|
||||
|
||||
HeaderLoop:
|
||||
for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] {
|
||||
for _, versionName := range strings.Fields(supportedVersions) {
|
||||
if versionName == "registry/2.0" {
|
||||
supportsV2 = true
|
||||
break HeaderLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !supportsV2 {
|
||||
return PingResult{}, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e)
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
// It would seem that no authentication/authorization is required.
|
||||
// So we don't need to parse/add any authorization schemes.
|
||||
return PingResult{Standalone: true}, nil
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
// Parse the WWW-Authenticate Header and store the challenges
|
||||
// on this endpoint object.
|
||||
e.AuthChallenges = parseAuthHeader(resp.Header)
|
||||
return PingResult{}, nil
|
||||
}
|
||||
|
||||
return PingResult{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
199
vendor/github.com/docker/docker/registry/endpoint_v1.go
generated
vendored
Normal file
199
vendor/github.com/docker/docker/registry/endpoint_v1.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
registrytypes "github.com/docker/engine-api/types/registry"
|
||||
)
|
||||
|
||||
// V1Endpoint stores basic information about a V1 registry endpoint.
|
||||
type V1Endpoint struct {
|
||||
client *http.Client
|
||||
URL *url.URL
|
||||
IsSecure bool
|
||||
}
|
||||
|
||||
// NewV1Endpoint parses the given address to return a registry endpoint. v can be used to
|
||||
// specify a specific endpoint version
|
||||
func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
|
||||
tlsConfig, err := newTLSConfig(index.Name, index.Secure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validateEndpoint(endpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
func validateEndpoint(endpoint *V1Endpoint) error {
|
||||
logrus.Debugf("pinging registry endpoint %s", endpoint)
|
||||
|
||||
// Try HTTPS ping to registry
|
||||
endpoint.URL.Scheme = "https"
|
||||
if _, err := endpoint.Ping(); err != nil {
|
||||
if endpoint.IsSecure {
|
||||
// If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry`
|
||||
// in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP.
|
||||
return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host)
|
||||
}
|
||||
|
||||
// If registry is insecure and HTTPS failed, fallback to HTTP.
|
||||
logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err)
|
||||
endpoint.URL.Scheme = "http"
|
||||
|
||||
var err2 error
|
||||
if _, err2 = endpoint.Ping(); err2 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
|
||||
endpoint := &V1Endpoint{
|
||||
IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify),
|
||||
URL: new(url.URL),
|
||||
}
|
||||
|
||||
*endpoint.URL = address
|
||||
|
||||
// TODO(tiborvass): make sure a ConnectTimeout transport is used
|
||||
tr := NewTransport(tlsConfig)
|
||||
endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...))
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// trimV1Address trims the version off the address and returns the
|
||||
// trimmed address or an error if there is a non-V1 version.
|
||||
func trimV1Address(address string) (string, error) {
|
||||
var (
|
||||
chunks []string
|
||||
apiVersionStr string
|
||||
)
|
||||
|
||||
if strings.HasSuffix(address, "/") {
|
||||
address = address[:len(address)-1]
|
||||
}
|
||||
|
||||
chunks = strings.Split(address, "/")
|
||||
apiVersionStr = chunks[len(chunks)-1]
|
||||
if apiVersionStr == "v1" {
|
||||
return strings.Join(chunks[:len(chunks)-1], "/"), nil
|
||||
}
|
||||
|
||||
for k, v := range apiVersions {
|
||||
if k != APIVersion1 && apiVersionStr == v {
|
||||
return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr)
|
||||
}
|
||||
}
|
||||
|
||||
return address, nil
|
||||
}
|
||||
|
||||
func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
|
||||
if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") {
|
||||
address = "https://" + address
|
||||
}
|
||||
|
||||
address, err := trimV1Address(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri, err := url.Parse(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// Get the formatted URL for the root of this registry Endpoint
|
||||
func (e *V1Endpoint) String() string {
|
||||
return e.URL.String() + "/v1/"
|
||||
}
|
||||
|
||||
// Path returns a formatted string for the URL
|
||||
// of this endpoint with the given path appended.
|
||||
func (e *V1Endpoint) Path(path string) string {
|
||||
return e.URL.String() + "/v1/" + path
|
||||
}
|
||||
|
||||
// Ping returns a PingResult which indicates whether the registry is standalone or not.
|
||||
func (e *V1Endpoint) Ping() (PingResult, error) {
|
||||
logrus.Debugf("attempting v1 ping for registry endpoint %s", e)
|
||||
|
||||
if e.String() == IndexServer {
|
||||
// Skip the check, we know this one is valid
|
||||
// (and we never want to fallback to http in case of error)
|
||||
return PingResult{Standalone: false}, nil
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", e.Path("_ping"), nil)
|
||||
if err != nil {
|
||||
return PingResult{Standalone: false}, err
|
||||
}
|
||||
|
||||
resp, err := e.client.Do(req)
|
||||
if err != nil {
|
||||
return PingResult{Standalone: false}, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
jsonString, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err)
|
||||
}
|
||||
|
||||
// If the header is absent, we assume true for compatibility with earlier
|
||||
// versions of the registry. default to true
|
||||
info := PingResult{
|
||||
Standalone: true,
|
||||
}
|
||||
if err := json.Unmarshal(jsonString, &info); err != nil {
|
||||
logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err)
|
||||
// don't stop here. Just assume sane defaults
|
||||
}
|
||||
if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
|
||||
logrus.Debugf("Registry version header: '%s'", hdr)
|
||||
info.Version = hdr
|
||||
}
|
||||
logrus.Debugf("PingResult.Version: %q", info.Version)
|
||||
|
||||
standalone := resp.Header.Get("X-Docker-Registry-Standalone")
|
||||
logrus.Debugf("Registry standalone header: '%s'", standalone)
|
||||
// Accepted values are "true" (case-insensitive) and "1".
|
||||
if strings.EqualFold(standalone, "true") || standalone == "1" {
|
||||
info.Standalone = true
|
||||
} else if len(standalone) > 0 {
|
||||
// there is a header set, and it is not "true" or "1", so assume fails
|
||||
info.Standalone = false
|
||||
}
|
||||
logrus.Debugf("PingResult.Standalone: %t", info.Standalone)
|
||||
return info, nil
|
||||
}
|
||||
81
vendor/github.com/docker/docker/registry/registry.go
generated
vendored
81
vendor/github.com/docker/docker/registry/registry.go
generated
vendored
@@ -13,17 +13,10 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/docker/docker/pkg/useragent"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
)
|
||||
|
||||
@@ -33,23 +26,7 @@ var (
|
||||
ErrAlreadyExists = errors.New("Image already exists")
|
||||
)
|
||||
|
||||
// dockerUserAgent is the User-Agent the Docker client uses to identify itself.
|
||||
// It is populated on init(), comprising version information of different components.
|
||||
var dockerUserAgent string
|
||||
|
||||
func init() {
|
||||
httpVersion := make([]useragent.VersionInfo, 0, 6)
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: dockerversion.Version})
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()})
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: dockerversion.GitCommit})
|
||||
if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()})
|
||||
}
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS})
|
||||
httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH})
|
||||
|
||||
dockerUserAgent = useragent.AppendVersions("", httpVersion...)
|
||||
|
||||
if runtime.GOOS != "linux" {
|
||||
V2Only = true
|
||||
}
|
||||
@@ -129,12 +106,13 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DockerHeaders returns request modifiers that ensure requests have
|
||||
// the User-Agent header set to dockerUserAgent and that metaHeaders
|
||||
// are added.
|
||||
func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier {
|
||||
modifiers := []transport.RequestModifier{
|
||||
transport.NewHeaderRequestModifier(http.Header{"User-Agent": []string{dockerUserAgent}}),
|
||||
// DockerHeaders returns request modifiers with a User-Agent and metaHeaders
|
||||
func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier {
|
||||
modifiers := []transport.RequestModifier{}
|
||||
if userAgent != "" {
|
||||
modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{
|
||||
"User-Agent": []string{userAgent},
|
||||
}))
|
||||
}
|
||||
if metaHeaders != nil {
|
||||
modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders))
|
||||
@@ -187,51 +165,6 @@ func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Reque
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldV2Fallback returns true if this error is a reason to fall back to v1.
|
||||
func ShouldV2Fallback(err errcode.Error) bool {
|
||||
switch err.Code {
|
||||
case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ErrNoSupport is an error type used for errors indicating that an operation
|
||||
// is not supported. It encapsulates a more specific error.
|
||||
type ErrNoSupport struct{ Err error }
|
||||
|
||||
func (e ErrNoSupport) Error() string {
|
||||
if e.Err == nil {
|
||||
return "not supported"
|
||||
}
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// ContinueOnError returns true if we should fallback to the next endpoint
|
||||
// as a result of this error.
|
||||
func ContinueOnError(err error) bool {
|
||||
switch v := err.(type) {
|
||||
case errcode.Errors:
|
||||
if len(v) == 0 {
|
||||
return true
|
||||
}
|
||||
return ContinueOnError(v[0])
|
||||
case ErrNoSupport:
|
||||
return ContinueOnError(v.Err)
|
||||
case errcode.Error:
|
||||
return ShouldV2Fallback(v)
|
||||
case *client.UnexpectedHTTPResponseError:
|
||||
return true
|
||||
case error:
|
||||
return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error()))
|
||||
}
|
||||
// let's be nice and fallback if the error is a completely
|
||||
// unexpected one.
|
||||
// If new errors have to be handled in some way, please
|
||||
// add them to the switch above.
|
||||
return true
|
||||
}
|
||||
|
||||
// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the
|
||||
// default TLS configuration.
|
||||
func NewTransport(tlsConfig *tls.Config) *http.Transport {
|
||||
|
||||
65
vendor/github.com/docker/docker/registry/service.go
generated
vendored
65
vendor/github.com/docker/docker/registry/service.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/engine-api/types"
|
||||
registrytypes "github.com/docker/engine-api/types/registry"
|
||||
@@ -28,29 +29,31 @@ func NewService(options *Options) *Service {
|
||||
// Auth contacts the public registry with the provided credentials,
|
||||
// and returns OK if authentication was successful.
|
||||
// It can be used to verify the validity of a client's credentials.
|
||||
func (s *Service) Auth(authConfig *types.AuthConfig) (string, error) {
|
||||
addr := authConfig.ServerAddress
|
||||
if addr == "" {
|
||||
// Use the official registry address if not specified.
|
||||
addr = IndexServer
|
||||
}
|
||||
index, err := s.ResolveIndex(addr)
|
||||
func (s *Service) Auth(authConfig *types.AuthConfig, userAgent string) (status string, err error) {
|
||||
endpoints, err := s.LookupPushEndpoints(authConfig.ServerAddress)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
endpointVersion := APIVersion(APIVersionUnknown)
|
||||
if V2Only {
|
||||
// Override the endpoint to only attempt a v2 ping
|
||||
endpointVersion = APIVersion2
|
||||
}
|
||||
for _, endpoint := range endpoints {
|
||||
login := loginV2
|
||||
if endpoint.Version == APIVersion1 {
|
||||
login = loginV1
|
||||
}
|
||||
|
||||
endpoint, err := NewEndpoint(index, nil, endpointVersion)
|
||||
if err != nil {
|
||||
status, err = login(authConfig, endpoint, userAgent)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if fErr, ok := err.(fallbackError); ok {
|
||||
err = fErr.err
|
||||
logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err)
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
authConfig.ServerAddress = endpoint.String()
|
||||
return Login(authConfig, endpoint)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
// splitReposSearchTerm breaks a search term into an index name and remote name
|
||||
@@ -72,7 +75,7 @@ func splitReposSearchTerm(reposName string) (string, string) {
|
||||
|
||||
// Search queries the public registry for images matching the specified
|
||||
// search terms, and returns the results.
|
||||
func (s *Service) Search(term string, authConfig *types.AuthConfig, headers map[string][]string) (*registrytypes.SearchResults, error) {
|
||||
func (s *Service) Search(term string, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) {
|
||||
if err := validateNoSchema(term); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -85,7 +88,7 @@ func (s *Service) Search(term string, authConfig *types.AuthConfig, headers map[
|
||||
}
|
||||
|
||||
// *TODO: Search multiple indexes.
|
||||
endpoint, err := NewEndpoint(index, http.Header(headers), APIVersionUnknown)
|
||||
endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -121,7 +124,7 @@ func (s *Service) ResolveIndex(name string) (*registrytypes.IndexInfo, error) {
|
||||
// APIEndpoint represents a remote API endpoint
|
||||
type APIEndpoint struct {
|
||||
Mirror bool
|
||||
URL string
|
||||
URL *url.URL
|
||||
Version APIVersion
|
||||
Official bool
|
||||
TrimHostname bool
|
||||
@@ -129,8 +132,8 @@ type APIEndpoint struct {
|
||||
}
|
||||
|
||||
// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint
|
||||
func (e APIEndpoint) ToV1Endpoint(metaHeaders http.Header) (*Endpoint, error) {
|
||||
return newEndpoint(e.URL, e.TLSConfig, metaHeaders)
|
||||
func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
|
||||
return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders)
|
||||
}
|
||||
|
||||
// TLSConfig constructs a client TLS configuration based on server defaults
|
||||
@@ -138,26 +141,22 @@ func (s *Service) TLSConfig(hostname string) (*tls.Config, error) {
|
||||
return newTLSConfig(hostname, isSecureIndex(s.Config, hostname))
|
||||
}
|
||||
|
||||
func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) {
|
||||
mirrorURL, err := url.Parse(mirror)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (s *Service) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) {
|
||||
return s.TLSConfig(mirrorURL.Host)
|
||||
}
|
||||
|
||||
// LookupPullEndpoints creates an list of endpoints to try to pull from, in order of preference.
|
||||
// It gives preference to v2 endpoints over v1, mirrors over the actual
|
||||
// registry, and HTTPS over plain HTTP.
|
||||
func (s *Service) LookupPullEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) {
|
||||
return s.lookupEndpoints(repoName)
|
||||
func (s *Service) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||
return s.lookupEndpoints(hostname)
|
||||
}
|
||||
|
||||
// LookupPushEndpoints creates an list of endpoints to try to push to, in order of preference.
|
||||
// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP.
|
||||
// Mirrors are not included.
|
||||
func (s *Service) LookupPushEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) {
|
||||
allEndpoints, err := s.lookupEndpoints(repoName)
|
||||
func (s *Service) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||
allEndpoints, err := s.lookupEndpoints(hostname)
|
||||
if err == nil {
|
||||
for _, endpoint := range allEndpoints {
|
||||
if !endpoint.Mirror {
|
||||
@@ -168,8 +167,8 @@ func (s *Service) LookupPushEndpoints(repoName reference.Named) (endpoints []API
|
||||
return endpoints, err
|
||||
}
|
||||
|
||||
func (s *Service) lookupEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) {
|
||||
endpoints, err = s.lookupV2Endpoints(repoName)
|
||||
func (s *Service) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||
endpoints, err = s.lookupV2Endpoints(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -178,7 +177,7 @@ func (s *Service) lookupEndpoints(repoName reference.Named) (endpoints []APIEndp
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
legacyEndpoints, err := s.lookupV1Endpoints(repoName)
|
||||
legacyEndpoints, err := s.lookupV1Endpoints(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
25
vendor/github.com/docker/docker/registry/service_v1.go
generated
vendored
25
vendor/github.com/docker/docker/registry/service_v1.go
generated
vendored
@@ -1,18 +1,15 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
)
|
||||
|
||||
func (s *Service) lookupV1Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) {
|
||||
func (s *Service) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||
var cfg = tlsconfig.ServerDefault
|
||||
tlsConfig := &cfg
|
||||
nameString := repoName.FullName()
|
||||
if strings.HasPrefix(nameString, DefaultNamespace+"/") {
|
||||
if hostname == DefaultNamespace {
|
||||
endpoints = append(endpoints, APIEndpoint{
|
||||
URL: DefaultV1Registry,
|
||||
Version: APIVersion1,
|
||||
@@ -23,12 +20,6 @@ func (s *Service) lookupV1Endpoints(repoName reference.Named) (endpoints []APIEn
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
slashIndex := strings.IndexRune(nameString, '/')
|
||||
if slashIndex <= 0 {
|
||||
return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString)
|
||||
}
|
||||
hostname := nameString[:slashIndex]
|
||||
|
||||
tlsConfig, err = s.TLSConfig(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -36,7 +27,10 @@ func (s *Service) lookupV1Endpoints(repoName reference.Named) (endpoints []APIEn
|
||||
|
||||
endpoints = []APIEndpoint{
|
||||
{
|
||||
URL: "https://" + hostname,
|
||||
URL: &url.URL{
|
||||
Scheme: "https",
|
||||
Host: hostname,
|
||||
},
|
||||
Version: APIVersion1,
|
||||
TrimHostname: true,
|
||||
TLSConfig: tlsConfig,
|
||||
@@ -45,7 +39,10 @@ func (s *Service) lookupV1Endpoints(repoName reference.Named) (endpoints []APIEn
|
||||
|
||||
if tlsConfig.InsecureSkipVerify {
|
||||
endpoints = append(endpoints, APIEndpoint{ // or this
|
||||
URL: "http://" + hostname,
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: hostname,
|
||||
},
|
||||
Version: APIVersion1,
|
||||
TrimHostname: true,
|
||||
// used to check if supposed to be secure via InsecureSkipVerify
|
||||
|
||||
35
vendor/github.com/docker/docker/registry/service_v2.go
generated
vendored
35
vendor/github.com/docker/docker/registry/service_v2.go
generated
vendored
@@ -1,26 +1,31 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/reference"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
)
|
||||
|
||||
func (s *Service) lookupV2Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) {
|
||||
func (s *Service) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||
var cfg = tlsconfig.ServerDefault
|
||||
tlsConfig := &cfg
|
||||
nameString := repoName.FullName()
|
||||
if strings.HasPrefix(nameString, DefaultNamespace+"/") {
|
||||
if hostname == DefaultNamespace {
|
||||
// v2 mirrors
|
||||
for _, mirror := range s.Config.Mirrors {
|
||||
mirrorTLSConfig, err := s.tlsConfigForMirror(mirror)
|
||||
if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") {
|
||||
mirror = "https://" + mirror
|
||||
}
|
||||
mirrorURL, err := url.Parse(mirror)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoints = append(endpoints, APIEndpoint{
|
||||
URL: mirror,
|
||||
URL: mirrorURL,
|
||||
// guess mirrors are v2
|
||||
Version: APIVersion2,
|
||||
Mirror: true,
|
||||
@@ -40,12 +45,6 @@ func (s *Service) lookupV2Endpoints(repoName reference.Named) (endpoints []APIEn
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
slashIndex := strings.IndexRune(nameString, '/')
|
||||
if slashIndex <= 0 {
|
||||
return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString)
|
||||
}
|
||||
hostname := nameString[:slashIndex]
|
||||
|
||||
tlsConfig, err = s.TLSConfig(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -53,7 +52,10 @@ func (s *Service) lookupV2Endpoints(repoName reference.Named) (endpoints []APIEn
|
||||
|
||||
endpoints = []APIEndpoint{
|
||||
{
|
||||
URL: "https://" + hostname,
|
||||
URL: &url.URL{
|
||||
Scheme: "https",
|
||||
Host: hostname,
|
||||
},
|
||||
Version: APIVersion2,
|
||||
TrimHostname: true,
|
||||
TLSConfig: tlsConfig,
|
||||
@@ -62,7 +64,10 @@ func (s *Service) lookupV2Endpoints(repoName reference.Named) (endpoints []APIEn
|
||||
|
||||
if tlsConfig.InsecureSkipVerify {
|
||||
endpoints = append(endpoints, APIEndpoint{
|
||||
URL: "http://" + hostname,
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: hostname,
|
||||
},
|
||||
Version: APIVersion2,
|
||||
TrimHostname: true,
|
||||
// used to check if supposed to be secure via InsecureSkipVerify
|
||||
|
||||
17
vendor/github.com/docker/docker/registry/session.go
generated
vendored
17
vendor/github.com/docker/docker/registry/session.go
generated
vendored
@@ -37,7 +37,7 @@ var (
|
||||
|
||||
// A Session is used to communicate with a V1 registry
|
||||
type Session struct {
|
||||
indexEndpoint *Endpoint
|
||||
indexEndpoint *V1Endpoint
|
||||
client *http.Client
|
||||
// TODO(tiborvass): remove authConfig
|
||||
authConfig *types.AuthConfig
|
||||
@@ -163,7 +163,7 @@ func (tr *authTransport) CancelRequest(req *http.Request) {
|
||||
|
||||
// NewSession creates a new session
|
||||
// TODO(tiborvass): remove authConfig param once registry client v2 is vendored
|
||||
func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *Endpoint) (r *Session, err error) {
|
||||
func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (r *Session, err error) {
|
||||
r = &Session{
|
||||
authConfig: authConfig,
|
||||
client: client,
|
||||
@@ -175,7 +175,7 @@ func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *End
|
||||
|
||||
// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
|
||||
// alongside all our requests.
|
||||
if endpoint.VersionString(1) != IndexServer && endpoint.URL.Scheme == "https" {
|
||||
if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" {
|
||||
info, err := endpoint.Ping()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -405,7 +405,7 @@ func buildEndpointsList(headers []string, indexEp string) ([]string, error) {
|
||||
|
||||
// GetRepositoryData returns lists of images and endpoints for the repository
|
||||
func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) {
|
||||
repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), name.RemoteName())
|
||||
repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), name.RemoteName())
|
||||
|
||||
logrus.Debugf("[registry] Calling GET %s", repositoryTarget)
|
||||
|
||||
@@ -444,7 +444,7 @@ func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, erro
|
||||
|
||||
var endpoints []string
|
||||
if res.Header.Get("X-Docker-Endpoints") != "" {
|
||||
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1))
|
||||
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -634,7 +634,7 @@ func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData,
|
||||
if validate {
|
||||
suffix = "images"
|
||||
}
|
||||
u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote.RemoteName(), suffix)
|
||||
u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), remote.RemoteName(), suffix)
|
||||
logrus.Debugf("[registry] PUT %s", u)
|
||||
logrus.Debugf("Image list pushed to index:\n%s", imgListJSON)
|
||||
headers := map[string][]string{
|
||||
@@ -680,7 +680,7 @@ func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData,
|
||||
if res.Header.Get("X-Docker-Endpoints") == "" {
|
||||
return nil, fmt.Errorf("Index response didn't contain any endpoints")
|
||||
}
|
||||
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1))
|
||||
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -722,7 +722,7 @@ func shouldRedirect(response *http.Response) bool {
|
||||
// SearchRepositories performs a search against the remote repository
|
||||
func (r *Session) SearchRepositories(term string) (*registrytypes.SearchResults, error) {
|
||||
logrus.Debugf("Index server: %s", r.indexEndpoint)
|
||||
u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term)
|
||||
u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term)
|
||||
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
@@ -752,7 +752,6 @@ func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig {
|
||||
return &types.AuthConfig{
|
||||
Username: r.authConfig.Username,
|
||||
Password: password,
|
||||
Email: r.authConfig.Email,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
81
vendor/github.com/docker/docker/registry/token.go
generated
vendored
81
vendor/github.com/docker/docker/registry/token.go
generated
vendored
@@ -1,81 +0,0 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type tokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint) (string, error) {
|
||||
realm, ok := params["realm"]
|
||||
if !ok {
|
||||
return "", errors.New("no realm specified for token auth challenge")
|
||||
}
|
||||
|
||||
realmURL, err := url.Parse(realm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid token auth challenge realm: %s", err)
|
||||
}
|
||||
|
||||
if realmURL.Scheme == "" {
|
||||
if registryEndpoint.IsSecure {
|
||||
realmURL.Scheme = "https"
|
||||
} else {
|
||||
realmURL.Scheme = "http"
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", realmURL.String(), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
reqParams := req.URL.Query()
|
||||
service := params["service"]
|
||||
scope := params["scope"]
|
||||
|
||||
if service != "" {
|
||||
reqParams.Add("service", service)
|
||||
}
|
||||
|
||||
for _, scopeField := range strings.Fields(scope) {
|
||||
reqParams.Add("scope", scopeField)
|
||||
}
|
||||
|
||||
if username != "" {
|
||||
reqParams.Add("account", username)
|
||||
req.SetBasicAuth(username, password)
|
||||
}
|
||||
|
||||
req.URL.RawQuery = reqParams.Encode()
|
||||
|
||||
resp, err := registryEndpoint.client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("token auth attempt for registry %s: %s request failed with status: %d %s", registryEndpoint, req.URL, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
tr := new(tokenResponse)
|
||||
if err = decoder.Decode(tr); err != nil {
|
||||
return "", fmt.Errorf("unable to decode token response: %s", err)
|
||||
}
|
||||
|
||||
if tr.Token == "" {
|
||||
return "", errors.New("authorization server did not include a token in the response")
|
||||
}
|
||||
|
||||
return tr.Token, nil
|
||||
}
|
||||
14
vendor/github.com/docker/docker/registry/types.go
generated
vendored
14
vendor/github.com/docker/docker/registry/types.go
generated
vendored
@@ -46,18 +46,18 @@ func (av APIVersion) String() string {
|
||||
return apiVersions[av]
|
||||
}
|
||||
|
||||
var apiVersions = map[APIVersion]string{
|
||||
1: "v1",
|
||||
2: "v2",
|
||||
}
|
||||
|
||||
// API Version identifiers.
|
||||
const (
|
||||
APIVersionUnknown = iota
|
||||
APIVersion1
|
||||
_ = iota
|
||||
APIVersion1 APIVersion = iota
|
||||
APIVersion2
|
||||
)
|
||||
|
||||
var apiVersions = map[APIVersion]string{
|
||||
APIVersion1: "v1",
|
||||
APIVersion2: "v2",
|
||||
}
|
||||
|
||||
// RepositoryInfo describes a repository
|
||||
type RepositoryInfo struct {
|
||||
reference.Named
|
||||
|
||||
13
vendor/github.com/docker/engine-api/types/auth.go
generated
vendored
13
vendor/github.com/docker/engine-api/types/auth.go
generated
vendored
@@ -2,10 +2,15 @@ package types
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth"`
|
||||
Email string `json:"email"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
// Email is an optional value associated with the username.
|
||||
// This field is deprecated and will be removed in a later
|
||||
// version of docker.
|
||||
Email string `json:"email,omitempty"`
|
||||
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
||||
|
||||
2
vendor/github.com/docker/engine-api/types/client.go
generated
vendored
2
vendor/github.com/docker/engine-api/types/client.go
generated
vendored
@@ -127,7 +127,7 @@ type ImageBuildOptions struct {
|
||||
Remove bool
|
||||
ForceRemove bool
|
||||
PullParent bool
|
||||
IsolationLevel container.IsolationLevel
|
||||
Isolation container.Isolation
|
||||
CPUSetCPUs string
|
||||
CPUSetMems string
|
||||
CPUShares int64
|
||||
|
||||
4
vendor/github.com/docker/engine-api/types/container/config.go
generated
vendored
4
vendor/github.com/docker/engine-api/types/container/config.go
generated
vendored
@@ -24,12 +24,12 @@ type Config struct {
|
||||
OpenStdin bool // Open stdin
|
||||
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
|
||||
Env []string // List of environment variable to set in the container
|
||||
Cmd *strslice.StrSlice // Command to run when starting the container
|
||||
Cmd strslice.StrSlice // Command to run when starting the container
|
||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
|
||||
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
|
||||
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
||||
WorkingDir string // Current directory (PWD) in the command will be launched
|
||||
Entrypoint *strslice.StrSlice // Entrypoint to run when starting the container
|
||||
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
|
||||
NetworkDisabled bool `json:",omitempty"` // Is network disabled
|
||||
MacAddress string `json:",omitempty"` // Mac Address of the container
|
||||
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
|
||||
|
||||
91
vendor/github.com/docker/engine-api/types/container/host_config.go
generated
vendored
91
vendor/github.com/docker/engine-api/types/container/host_config.go
generated
vendored
@@ -12,13 +12,13 @@ import (
|
||||
// NetworkMode represents the container network stack.
|
||||
type NetworkMode string
|
||||
|
||||
// IsolationLevel represents the isolation level of a container. The supported
|
||||
// Isolation represents the isolation technology of a container. The supported
|
||||
// values are platform specific
|
||||
type IsolationLevel string
|
||||
type Isolation string
|
||||
|
||||
// IsDefault indicates the default isolation level of a container. On Linux this
|
||||
// IsDefault indicates the default isolation technology of a container. On Linux this
|
||||
// is the native driver. On Windows, this is a Windows Server Container.
|
||||
func (i IsolationLevel) IsDefault() bool {
|
||||
func (i Isolation) IsDefault() bool {
|
||||
return strings.ToLower(string(i)) == "default" || string(i) == ""
|
||||
}
|
||||
|
||||
@@ -65,6 +65,30 @@ func (n IpcMode) Container() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// UsernsMode represents userns mode in the container.
|
||||
type UsernsMode string
|
||||
|
||||
// IsHost indicates whether the container uses the host's userns.
|
||||
func (n UsernsMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// IsPrivate indicates whether the container uses the a private userns.
|
||||
func (n UsernsMode) IsPrivate() bool {
|
||||
return !(n.IsHost())
|
||||
}
|
||||
|
||||
// Valid indicates whether the userns is valid.
|
||||
func (n UsernsMode) Valid() bool {
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// UTSMode represents the UTS namespace of the container.
|
||||
type UTSMode string
|
||||
|
||||
@@ -151,6 +175,11 @@ func (rp *RestartPolicy) IsUnlessStopped() bool {
|
||||
return rp.Name == "unless-stopped"
|
||||
}
|
||||
|
||||
// IsSame compares two RestartPolicy to see if they are the same
|
||||
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
|
||||
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
|
||||
}
|
||||
|
||||
// LogConfig represents the logging configuration of the container.
|
||||
type LogConfig struct {
|
||||
Type string
|
||||
@@ -161,6 +190,7 @@ type LogConfig struct {
|
||||
type Resources struct {
|
||||
// Applicable to all platforms
|
||||
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
|
||||
Memory int64 // Memory limit (in bytes)
|
||||
|
||||
// Applicable to UNIX platforms
|
||||
CgroupParent string // Parent cgroup.
|
||||
@@ -175,14 +205,19 @@ type Resources struct {
|
||||
CpusetCpus string // CpusetCpus 0-2, 0,1
|
||||
CpusetMems string // CpusetMems 0-2, 0,1
|
||||
Devices []DeviceMapping // List of devices to map inside the container
|
||||
DiskQuota int64 // Disk limit (in bytes)
|
||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||
Memory int64 // Memory limit (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit int64 // Setting pids limit for a container
|
||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||
|
||||
// Applicable to Windows
|
||||
BlkioIOps uint64 // Maximum IOps for the container system drive
|
||||
BlkioBps uint64 // Maximum Bytes per second for the container system drive
|
||||
SandboxSize uint64 // System drive will be expanded to at least this size (in bytes)
|
||||
}
|
||||
|
||||
// UpdateConfig holds the mutable attributes of a Container.
|
||||
@@ -190,6 +225,7 @@ type Resources struct {
|
||||
type UpdateConfig struct {
|
||||
// Contains container's resources (cgroups, ulimits)
|
||||
Resources
|
||||
RestartPolicy RestartPolicy
|
||||
}
|
||||
|
||||
// HostConfig the non-portable Config structure of a container.
|
||||
@@ -207,28 +243,31 @@ type HostConfig struct {
|
||||
VolumesFrom []string // List of volumes to take from other container
|
||||
|
||||
// Applicable to UNIX platforms
|
||||
CapAdd *strslice.StrSlice // List of kernel capabilities to add to the container
|
||||
CapDrop *strslice.StrSlice // List of kernel capabilities to remove from the container
|
||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||
ExtraHosts []string // List of extra hosts
|
||||
GroupAdd []string // List of additional groups that the container process will run as
|
||||
IpcMode IpcMode // IPC namespace to use for the container
|
||||
Links []string // List of links (in the name:alias form)
|
||||
OomScoreAdj int // Container preference for OOM-killing
|
||||
PidMode PidMode // PID namespace to use for the container
|
||||
Privileged bool // Is the container in privileged mode
|
||||
PublishAllPorts bool // Should docker publish all exposed port for the container
|
||||
ReadonlyRootfs bool // Is the container root filesystem in read-only
|
||||
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
|
||||
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
|
||||
UTSMode UTSMode // UTS namespace to use for the container
|
||||
ShmSize int64 // Total shm memory usage
|
||||
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
||||
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||
ExtraHosts []string // List of extra hosts
|
||||
GroupAdd []string // List of additional groups that the container process will run as
|
||||
IpcMode IpcMode // IPC namespace to use for the container
|
||||
Links []string // List of links (in the name:alias form)
|
||||
OomScoreAdj int // Container preference for OOM-killing
|
||||
PidMode PidMode // PID namespace to use for the container
|
||||
Privileged bool // Is the container in privileged mode
|
||||
PublishAllPorts bool // Should docker publish all exposed port for the container
|
||||
ReadonlyRootfs bool // Is the container root filesystem in read-only
|
||||
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
|
||||
StorageOpt []string // Storage driver options per container.
|
||||
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
|
||||
UTSMode UTSMode // UTS namespace to use for the container
|
||||
UsernsMode UsernsMode // The user namespace to use for the container
|
||||
ShmSize int64 // Total shm memory usage
|
||||
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
|
||||
|
||||
// Applicable to Windows
|
||||
ConsoleSize [2]int // Initial console size
|
||||
Isolation IsolationLevel // Isolation level of the container (eg default, hyperv)
|
||||
ConsoleSize [2]int // Initial console size
|
||||
Isolation Isolation // Isolation technology of the container (eg default, hyperv)
|
||||
|
||||
// Contains container's resources (cgroups, ulimits)
|
||||
Resources
|
||||
|
||||
10
vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go
generated
vendored
10
vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go
generated
vendored
@@ -4,8 +4,8 @@ package container
|
||||
|
||||
import "strings"
|
||||
|
||||
// IsValid indicates is an isolation level is valid
|
||||
func (i IsolationLevel) IsValid() bool {
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault()
|
||||
}
|
||||
|
||||
@@ -72,12 +72,6 @@ func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// IsPreDefinedNetwork indicates if a network is predefined by the daemon
|
||||
func IsPreDefinedNetwork(network string) bool {
|
||||
n := NetworkMode(network)
|
||||
return n.IsBridge() || n.IsHost() || n.IsNone()
|
||||
}
|
||||
|
||||
//UserDefined indicates user-created network
|
||||
func (n NetworkMode) UserDefined() string {
|
||||
if n.IsUserDefined() {
|
||||
|
||||
87
vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go
generated
vendored
87
vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go
generated
vendored
@@ -15,65 +15,74 @@ func (n NetworkMode) IsNone() bool {
|
||||
return n == "none"
|
||||
}
|
||||
|
||||
// IsContainer indicates whether container uses a container network stack.
|
||||
// Returns false as windows doesn't support this mode
|
||||
func (n NetworkMode) IsContainer() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
// in windows it is given the name NAT
|
||||
func (n NetworkMode) IsBridge() bool {
|
||||
return n == "nat"
|
||||
}
|
||||
|
||||
// IsHost indicates whether container uses the host network stack.
|
||||
// returns false as this is not supported by windows
|
||||
func (n NetworkMode) IsHost() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPrivate indicates whether container uses it's private network stack.
|
||||
func (n NetworkMode) IsPrivate() bool {
|
||||
return !(n.IsHost() || n.IsContainer())
|
||||
}
|
||||
|
||||
// ConnectedContainer is the id of the container which network this container is connected to.
|
||||
// Returns blank string on windows
|
||||
func (n NetworkMode) ConnectedContainer() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsUserDefined indicates user-created network
|
||||
func (n NetworkMode) IsUserDefined() bool {
|
||||
return !n.IsDefault() && !n.IsNone()
|
||||
return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
|
||||
}
|
||||
|
||||
// IsHyperV indicates the use of a Hyper-V partition for isolation
|
||||
func (i IsolationLevel) IsHyperV() bool {
|
||||
func (i Isolation) IsHyperV() bool {
|
||||
return strings.ToLower(string(i)) == "hyperv"
|
||||
}
|
||||
|
||||
// IsProcess indicates the use of process isolation
|
||||
func (i IsolationLevel) IsProcess() bool {
|
||||
func (i Isolation) IsProcess() bool {
|
||||
return strings.ToLower(string(i)) == "process"
|
||||
}
|
||||
|
||||
// IsValid indicates is an isolation level is valid
|
||||
func (i IsolationLevel) IsValid() bool {
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool {
|
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
||||
}
|
||||
|
||||
// DefaultDaemonNetworkMode returns the default network stack the daemon should
|
||||
// use.
|
||||
func DefaultDaemonNetworkMode() NetworkMode {
|
||||
return NetworkMode("default")
|
||||
}
|
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string {
|
||||
if n.IsDefault() {
|
||||
return "default"
|
||||
} else if n.IsBridge() {
|
||||
return "nat"
|
||||
} else if n.IsNone() {
|
||||
return "none"
|
||||
} else if n.IsUserDefined() {
|
||||
return n.UserDefined()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsPreDefinedNetwork indicates if a network is predefined by the daemon
|
||||
func IsPreDefinedNetwork(network string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateNetMode ensures that the various combinations of requested
|
||||
// network settings are valid.
|
||||
func ValidateNetMode(c *Config, hc *HostConfig) error {
|
||||
// We may not be passed a host config, such as in the case of docker commit
|
||||
if hc == nil {
|
||||
return nil
|
||||
}
|
||||
parts := strings.Split(string(hc.NetworkMode), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "default", "none":
|
||||
default:
|
||||
return fmt.Errorf("invalid --net: %s", hc.NetworkMode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateIsolationLevel performs platform specific validation of the
|
||||
// isolation level in the hostconfig structure. Windows supports 'default' (or
|
||||
// ValidateIsolation performs platform specific validation of the
|
||||
// isolation technology in the hostconfig structure. Windows supports 'default' (or
|
||||
// blank), 'process', or 'hyperv'.
|
||||
func ValidateIsolationLevel(hc *HostConfig) error {
|
||||
func ValidateIsolation(hc *HostConfig) error {
|
||||
// We may not be passed a host config, such as in the case of docker commit
|
||||
if hc == nil {
|
||||
return nil
|
||||
@@ -83,3 +92,11 @@ func ValidateIsolationLevel(hc *HostConfig) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//UserDefined indicates user-created network
|
||||
func (n NetworkMode) UserDefined() string {
|
||||
if n.IsUserDefined() {
|
||||
return string(n)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
57
vendor/github.com/docker/engine-api/types/strslice/strslice.go
generated
vendored
57
vendor/github.com/docker/engine-api/types/strslice/strslice.go
generated
vendored
@@ -1,29 +1,18 @@
|
||||
package strslice
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
import "encoding/json"
|
||||
|
||||
// StrSlice represents a string or an array of strings.
|
||||
// We need to override the json decoder to accept both options.
|
||||
type StrSlice struct {
|
||||
parts []string
|
||||
}
|
||||
type StrSlice []string
|
||||
|
||||
// MarshalJSON Marshals (or serializes) the StrSlice into the json format.
|
||||
// This method is needed to implement json.Marshaller.
|
||||
func (e *StrSlice) MarshalJSON() ([]byte, error) {
|
||||
if e == nil {
|
||||
return []byte{}, nil
|
||||
}
|
||||
return json.Marshal(e.Slice())
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes the byte slice whether it's a string or an array of strings.
|
||||
// This method is needed to implement json.Unmarshaler.
|
||||
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
|
||||
// strings. This method is needed to implement json.Unmarshaler.
|
||||
func (e *StrSlice) UnmarshalJSON(b []byte) error {
|
||||
if len(b) == 0 {
|
||||
// With no input, we preserve the existing value by returning nil and
|
||||
// leaving the target alone. This allows defining default values for
|
||||
// the type.
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -36,36 +25,6 @@ func (e *StrSlice) UnmarshalJSON(b []byte) error {
|
||||
p = append(p, s)
|
||||
}
|
||||
|
||||
e.parts = p
|
||||
*e = p
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len returns the number of parts of the StrSlice.
|
||||
func (e *StrSlice) Len() int {
|
||||
if e == nil {
|
||||
return 0
|
||||
}
|
||||
return len(e.parts)
|
||||
}
|
||||
|
||||
// Slice gets the parts of the StrSlice as a Slice of string.
|
||||
func (e *StrSlice) Slice() []string {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return e.parts
|
||||
}
|
||||
|
||||
// ToString gets space separated string of all the parts.
|
||||
func (e *StrSlice) ToString() string {
|
||||
s := e.Slice()
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(s, " ")
|
||||
}
|
||||
|
||||
// New creates an StrSlice based on the specified parts (as strings).
|
||||
func New(parts ...string) *StrSlice {
|
||||
return &StrSlice{parts}
|
||||
}
|
||||
|
||||
13
vendor/github.com/docker/engine-api/types/types.go
generated
vendored
13
vendor/github.com/docker/engine-api/types/types.go
generated
vendored
@@ -142,11 +142,13 @@ type Container struct {
|
||||
SizeRw int64 `json:",omitempty"`
|
||||
SizeRootFs int64 `json:",omitempty"`
|
||||
Labels map[string]string
|
||||
State string
|
||||
Status string
|
||||
HostConfig struct {
|
||||
NetworkMode string `json:",omitempty"`
|
||||
}
|
||||
NetworkSettings *SummaryNetworkSettings
|
||||
Mounts []MountPoint
|
||||
}
|
||||
|
||||
// CopyConfig contains request body of Remote API:
|
||||
@@ -202,6 +204,7 @@ type Info struct {
|
||||
Plugins PluginsInfo
|
||||
MemoryLimit bool
|
||||
SwapLimit bool
|
||||
KernelMemory bool
|
||||
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
|
||||
CPUCfsQuota bool `json:"CpuCfsQuota"`
|
||||
CPUShares bool
|
||||
@@ -216,6 +219,7 @@ type Info struct {
|
||||
SystemTime string
|
||||
ExecutionDriver string
|
||||
LoggingDriver string
|
||||
CgroupDriver string
|
||||
NEventsListener int
|
||||
KernelVersion string
|
||||
OperatingSystem string
|
||||
@@ -223,8 +227,6 @@ type Info struct {
|
||||
Architecture string
|
||||
IndexServerAddress string
|
||||
RegistryConfig *registry.ServiceConfig
|
||||
InitSha1 string
|
||||
InitPath string
|
||||
NCPU int
|
||||
MemTotal int64
|
||||
DockerRootDir string
|
||||
@@ -239,8 +241,8 @@ type Info struct {
|
||||
ClusterAdvertise string
|
||||
}
|
||||
|
||||
// PluginsInfo is temp struct holds Plugins name
|
||||
// registered with docker daemon. It used by Info struct
|
||||
// PluginsInfo is a temp struct holding Plugins name
|
||||
// registered with docker daemon. It is used by Info struct
|
||||
type PluginsInfo struct {
|
||||
// List of Volume plugins registered
|
||||
Volume []string
|
||||
@@ -388,7 +390,9 @@ type NetworkResource struct {
|
||||
ID string `json:"Id"`
|
||||
Scope string
|
||||
Driver string
|
||||
EnableIPv6 bool
|
||||
IPAM network.IPAM
|
||||
Internal bool
|
||||
Containers map[string]EndpointResource
|
||||
Options map[string]string
|
||||
}
|
||||
@@ -407,6 +411,7 @@ type NetworkCreate struct {
|
||||
Name string
|
||||
CheckDuplicate bool
|
||||
Driver string
|
||||
EnableIPv6 bool
|
||||
IPAM network.IPAM
|
||||
Internal bool
|
||||
Options map[string]string
|
||||
|
||||
21
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
21
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
@@ -1,8 +1,19 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.3
|
||||
- go: 1.4
|
||||
- go: 1.5
|
||||
- go: 1.6
|
||||
- go: tip
|
||||
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/vet
|
||||
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
||||
|
||||
18
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
18
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
@@ -1,14 +1,20 @@
|
||||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.2
|
||||
- go: 1.3
|
||||
- go: 1.4
|
||||
- go: 1.5
|
||||
- go: 1.6
|
||||
- go: tip
|
||||
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/vet
|
||||
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
||||
|
||||
2
vendor/github.com/gorilla/mux/README.md
generated
vendored
2
vendor/github.com/gorilla/mux/README.md
generated
vendored
@@ -3,6 +3,8 @@ mux
|
||||
[](https://godoc.org/github.com/gorilla/mux)
|
||||
[](https://travis-ci.org/gorilla/mux)
|
||||
|
||||
http://www.gorillatoolkit.org/pkg/mux
|
||||
|
||||
Package `gorilla/mux` implements a request router and dispatcher.
|
||||
|
||||
The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
|
||||
|
||||
2
vendor/github.com/gorilla/mux/doc.go
generated
vendored
2
vendor/github.com/gorilla/mux/doc.go
generated
vendored
@@ -3,7 +3,7 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package gorilla/mux implements a request router and dispatcher.
|
||||
Package mux implements a request router and dispatcher.
|
||||
|
||||
The name mux stands for "HTTP request multiplexer". Like the standard
|
||||
http.ServeMux, mux.Router matches incoming requests against a list of
|
||||
|
||||
2
vendor/github.com/gorilla/mux/mux.go
generated
vendored
2
vendor/github.com/gorilla/mux/mux.go
generated
vendored
@@ -236,7 +236,7 @@ func (r *Router) Schemes(schemes ...string) *Route {
|
||||
return r.NewRoute().Schemes(schemes...)
|
||||
}
|
||||
|
||||
// BuildVars registers a new route with a custom function for modifying
|
||||
// BuildVarsFunc registers a new route with a custom function for modifying
|
||||
// route variables before building a URL.
|
||||
func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
|
||||
return r.NewRoute().BuildVarsFunc(f)
|
||||
|
||||
78
vendor/github.com/gorilla/mux/regexp.go
generated
vendored
78
vendor/github.com/gorilla/mux/regexp.go
generated
vendored
@@ -73,14 +73,17 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash
|
||||
tpl[idxs[i]:end])
|
||||
}
|
||||
// Build the regexp pattern.
|
||||
varIdx := i / 2
|
||||
fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt)
|
||||
if patt[0] == '(' && patt[len(patt)-1] == ')' {
|
||||
fmt.Fprintf(pattern, "%s%s", regexp.QuoteMeta(raw), patt)
|
||||
} else {
|
||||
fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt)
|
||||
}
|
||||
// Build the reverse template.
|
||||
fmt.Fprintf(reverse, "%s%%s", raw)
|
||||
|
||||
// Append variable name and compiled pattern.
|
||||
varsN[varIdx] = name
|
||||
varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
|
||||
varsN[i/2] = name
|
||||
varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -148,10 +151,11 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
|
||||
if !r.matchHost {
|
||||
if r.matchQuery {
|
||||
return r.matchQueryString(req)
|
||||
} else {
|
||||
return r.regexp.MatchString(req.URL.Path)
|
||||
}
|
||||
|
||||
return r.regexp.MatchString(req.URL.Path)
|
||||
}
|
||||
|
||||
return r.regexp.MatchString(getHost(req))
|
||||
}
|
||||
|
||||
@@ -181,10 +185,10 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
// getUrlQuery returns a single query parameter from a request URL.
|
||||
// getURLQuery returns a single query parameter from a request URL.
|
||||
// For a URL with foo=bar&baz=ding, we return only the relevant key
|
||||
// value pair for the routeRegexp.
|
||||
func (r *routeRegexp) getUrlQuery(req *http.Request) string {
|
||||
func (r *routeRegexp) getURLQuery(req *http.Request) string {
|
||||
if !r.matchQuery {
|
||||
return ""
|
||||
}
|
||||
@@ -198,14 +202,14 @@ func (r *routeRegexp) getUrlQuery(req *http.Request) string {
|
||||
}
|
||||
|
||||
func (r *routeRegexp) matchQueryString(req *http.Request) bool {
|
||||
return r.regexp.MatchString(r.getUrlQuery(req))
|
||||
return r.regexp.MatchString(r.getURLQuery(req))
|
||||
}
|
||||
|
||||
// braceIndices returns the first level curly brace indices from a string.
|
||||
// It returns an error in case of unbalanced braces.
|
||||
func braceIndices(s string) ([]int, error) {
|
||||
var level, idx int
|
||||
idxs := make([]int, 0)
|
||||
var idxs []int
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '{':
|
||||
@@ -246,30 +250,17 @@ type routeRegexpGroup struct {
|
||||
func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
|
||||
// Store host variables.
|
||||
if v.host != nil {
|
||||
hostVars := v.host.regexp.FindStringSubmatch(getHost(req))
|
||||
if hostVars != nil {
|
||||
subexpNames := v.host.regexp.SubexpNames()
|
||||
varName := 0
|
||||
for i, name := range subexpNames[1:] {
|
||||
if name != "" && name == varGroupName(varName) {
|
||||
m.Vars[v.host.varsN[varName]] = hostVars[i+1]
|
||||
varName++
|
||||
}
|
||||
}
|
||||
host := getHost(req)
|
||||
matches := v.host.regexp.FindStringSubmatchIndex(host)
|
||||
if len(matches) > 0 {
|
||||
extractVars(host, matches, v.host.varsN, m.Vars)
|
||||
}
|
||||
}
|
||||
// Store path variables.
|
||||
if v.path != nil {
|
||||
pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path)
|
||||
if pathVars != nil {
|
||||
subexpNames := v.path.regexp.SubexpNames()
|
||||
varName := 0
|
||||
for i, name := range subexpNames[1:] {
|
||||
if name != "" && name == varGroupName(varName) {
|
||||
m.Vars[v.path.varsN[varName]] = pathVars[i+1]
|
||||
varName++
|
||||
}
|
||||
}
|
||||
matches := v.path.regexp.FindStringSubmatchIndex(req.URL.Path)
|
||||
if len(matches) > 0 {
|
||||
extractVars(req.URL.Path, matches, v.path.varsN, m.Vars)
|
||||
// Check if we should redirect.
|
||||
if v.path.strictSlash {
|
||||
p1 := strings.HasSuffix(req.URL.Path, "/")
|
||||
@@ -288,16 +279,10 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
|
||||
}
|
||||
// Store query string variables.
|
||||
for _, q := range v.queries {
|
||||
queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req))
|
||||
if queryVars != nil {
|
||||
subexpNames := q.regexp.SubexpNames()
|
||||
varName := 0
|
||||
for i, name := range subexpNames[1:] {
|
||||
if name != "" && name == varGroupName(varName) {
|
||||
m.Vars[q.varsN[varName]] = queryVars[i+1]
|
||||
varName++
|
||||
}
|
||||
}
|
||||
queryURL := q.getURLQuery(req)
|
||||
matches := q.regexp.FindStringSubmatchIndex(queryURL)
|
||||
if len(matches) > 0 {
|
||||
extractVars(queryURL, matches, q.varsN, m.Vars)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -315,3 +300,16 @@ func getHost(r *http.Request) string {
|
||||
return host
|
||||
|
||||
}
|
||||
|
||||
func extractVars(input string, matches []int, names []string, output map[string]string) {
|
||||
matchesCount := 0
|
||||
prevEnd := -1
|
||||
for i := 2; i < len(matches) && matchesCount < len(names); i += 2 {
|
||||
if prevEnd < matches[i+1] {
|
||||
value := input[matches[i]:matches[i+1]]
|
||||
output[names[matchesCount]] = value
|
||||
prevEnd = matches[i+1]
|
||||
matchesCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
6
vendor/github.com/gorilla/mux/route.go
generated
vendored
6
vendor/github.com/gorilla/mux/route.go
generated
vendored
@@ -217,8 +217,9 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
|
||||
return matchMapWithRegex(m, r.Header, true)
|
||||
}
|
||||
|
||||
// Regular expressions can be used with headers as well.
|
||||
// It accepts a sequence of key/value pairs, where the value has regex support. For example
|
||||
// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
|
||||
// support. For example:
|
||||
//
|
||||
// r := mux.NewRouter()
|
||||
// r.HeadersRegexp("Content-Type", "application/(text|json)",
|
||||
// "X-Requested-With", "XMLHttpRequest")
|
||||
@@ -263,6 +264,7 @@ func (r *Route) Host(tpl string) *Route {
|
||||
// MatcherFunc is the function signature used by custom matchers.
|
||||
type MatcherFunc func(*http.Request, *RouteMatch) bool
|
||||
|
||||
// Match returns the match for a given request.
|
||||
func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
|
||||
return m(r, match)
|
||||
}
|
||||
|
||||
39
vendor/github.com/vbatts/tar-split/LICENSE
generated
vendored
39
vendor/github.com/vbatts/tar-split/LICENSE
generated
vendored
@@ -1,28 +1,19 @@
|
||||
Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA
|
||||
|
||||
All rights reserved.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
11
vendor/github.com/vbatts/tar-split/archive/tar/common.go
generated
vendored
11
vendor/github.com/vbatts/tar-split/archive/tar/common.go
generated
vendored
@@ -327,14 +327,3 @@ func toASCII(s string) string {
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||
// data section even if a size is specified.
|
||||
func isHeaderOnlyType(flag byte) bool {
|
||||
switch flag {
|
||||
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
699
vendor/github.com/vbatts/tar-split/archive/tar/reader.go
generated
vendored
699
vendor/github.com/vbatts/tar-split/archive/tar/reader.go
generated
vendored
@@ -12,7 +12,6 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -40,10 +39,6 @@ type Reader struct {
|
||||
rawBytes *bytes.Buffer // last raw bits
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
err error // Last error seen
|
||||
}
|
||||
|
||||
// RawBytes accesses the raw bytes of the archive, apart from the file payload itself.
|
||||
// This includes the header and padding.
|
||||
//
|
||||
@@ -75,36 +70,12 @@ type regFileReader struct {
|
||||
nb int64 // number of unread bytes for current file entry
|
||||
}
|
||||
|
||||
// A sparseFileReader is a numBytesReader for reading sparse file data from a
|
||||
// tar archive.
|
||||
// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
|
||||
type sparseFileReader struct {
|
||||
rfr numBytesReader // Reads the sparse-encoded file data
|
||||
sp []sparseEntry // The sparse map for the file
|
||||
pos int64 // Keeps track of file position
|
||||
total int64 // Total size of the file
|
||||
}
|
||||
|
||||
// A sparseEntry holds a single entry in a sparse file's sparse map.
|
||||
//
|
||||
// Sparse files are represented using a series of sparseEntrys.
|
||||
// Despite the name, a sparseEntry represents an actual data fragment that
|
||||
// references data found in the underlying archive stream. All regions not
|
||||
// covered by a sparseEntry are logically filled with zeros.
|
||||
//
|
||||
// For example, if the underlying raw file contains the 10-byte data:
|
||||
// var compactData = "abcdefgh"
|
||||
//
|
||||
// And the sparse map has the following entries:
|
||||
// var sp = []sparseEntry{
|
||||
// {offset: 2, numBytes: 5} // Data fragment for [2..7]
|
||||
// {offset: 18, numBytes: 3} // Data fragment for [18..21]
|
||||
// }
|
||||
//
|
||||
// Then the content of the resulting sparse file with a "real" size of 25 is:
|
||||
// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
|
||||
type sparseEntry struct {
|
||||
offset int64 // Starting position of the fragment
|
||||
numBytes int64 // Length of the fragment
|
||||
rfr *regFileReader // reads the sparse-encoded file data
|
||||
sp []sparseEntry // the sparse map for the file
|
||||
pos int64 // keeps track of file position
|
||||
tot int64 // total size of the file
|
||||
}
|
||||
|
||||
// Keywords for GNU sparse files in a PAX extended header
|
||||
@@ -138,6 +109,7 @@ func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
|
||||
//
|
||||
// io.EOF is returned at the end of the input.
|
||||
func (tr *Reader) Next() (*Header, error) {
|
||||
var hdr *Header
|
||||
if tr.RawAccounting {
|
||||
if tr.rawBytes == nil {
|
||||
tr.rawBytes = bytes.NewBuffer(nil)
|
||||
@@ -145,88 +117,98 @@ func (tr *Reader) Next() (*Header, error) {
|
||||
tr.rawBytes.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
if tr.err == nil {
|
||||
tr.skipUnread()
|
||||
}
|
||||
|
||||
var hdr *Header
|
||||
var extHdrs map[string]string
|
||||
|
||||
// Externally, Next iterates through the tar archive as if it is a series of
|
||||
// files. Internally, the tar format often uses fake "files" to add meta
|
||||
// data that describes the next file. These meta data "files" should not
|
||||
// normally be visible to the outside. As such, this loop iterates through
|
||||
// one or more "header files" until it finds a "normal file".
|
||||
loop:
|
||||
for {
|
||||
tr.err = tr.skipUnread()
|
||||
if tr.err != nil {
|
||||
return hdr, tr.err
|
||||
}
|
||||
hdr = tr.readHeader()
|
||||
if hdr == nil {
|
||||
return hdr, tr.err
|
||||
}
|
||||
// Check for PAX/GNU header.
|
||||
switch hdr.Typeflag {
|
||||
case TypeXHeader:
|
||||
// PAX extended header
|
||||
headers, err := parsePAX(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We actually read the whole file,
|
||||
// but this skips alignment padding
|
||||
tr.skipUnread()
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
|
||||
hdr = tr.readHeader()
|
||||
if tr.err != nil {
|
||||
if hdr == nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
// Check for PAX/GNU special headers and files.
|
||||
switch hdr.Typeflag {
|
||||
case TypeXHeader:
|
||||
extHdrs, tr.err = parsePAX(tr)
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
continue loop // This is a meta header affecting the next header
|
||||
case TypeGNULongName, TypeGNULongLink:
|
||||
var realname []byte
|
||||
realname, tr.err = ioutil.ReadAll(tr)
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
mergePAX(hdr, headers)
|
||||
|
||||
if tr.RawAccounting {
|
||||
if _, tr.err = tr.rawBytes.Write(realname); tr.err != nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
}
|
||||
|
||||
// Convert GNU extensions to use PAX headers.
|
||||
if extHdrs == nil {
|
||||
extHdrs = make(map[string]string)
|
||||
}
|
||||
var p parser
|
||||
switch hdr.Typeflag {
|
||||
case TypeGNULongName:
|
||||
extHdrs[paxPath] = p.parseString(realname)
|
||||
case TypeGNULongLink:
|
||||
extHdrs[paxLinkpath] = p.parseString(realname)
|
||||
}
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
return nil, tr.err
|
||||
}
|
||||
continue loop // This is a meta header affecting the next header
|
||||
default:
|
||||
mergePAX(hdr, extHdrs)
|
||||
|
||||
// Check for a PAX format sparse file
|
||||
sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
|
||||
if err != nil {
|
||||
tr.err = err
|
||||
// Check for a PAX format sparse file
|
||||
sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers)
|
||||
if err != nil {
|
||||
tr.err = err
|
||||
return nil, err
|
||||
}
|
||||
if sp != nil {
|
||||
// Current file is a PAX format GNU sparse file.
|
||||
// Set the current file reader to a sparse file reader.
|
||||
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
|
||||
}
|
||||
return hdr, nil
|
||||
case TypeGNULongName:
|
||||
// We have a GNU long name header. Its contents are the real file name.
|
||||
realname, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf []byte
|
||||
if tr.RawAccounting {
|
||||
if _, err = tr.rawBytes.Write(realname); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sp != nil {
|
||||
// Current file is a PAX format GNU sparse file.
|
||||
// Set the current file reader to a sparse file reader.
|
||||
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
}
|
||||
break loop // This is a file, so stop
|
||||
buf = make([]byte, tr.rawBytes.Len())
|
||||
copy(buf[:], tr.RawBytes())
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
// since the above call to Next() resets the buffer, we need to throw the bytes over
|
||||
if tr.RawAccounting {
|
||||
buf = append(buf, tr.RawBytes()...)
|
||||
if _, err = tr.rawBytes.Write(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
hdr.Name = cString(realname)
|
||||
return hdr, err
|
||||
case TypeGNULongLink:
|
||||
// We have a GNU long link header.
|
||||
realname, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf []byte
|
||||
if tr.RawAccounting {
|
||||
if _, err = tr.rawBytes.Write(realname); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = make([]byte, tr.rawBytes.Len())
|
||||
copy(buf[:], tr.RawBytes())
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
// since the above call to Next() resets the buffer, we need to throw the bytes over
|
||||
if tr.RawAccounting {
|
||||
buf = append(buf, tr.RawBytes()...)
|
||||
if _, err = tr.rawBytes.Write(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
hdr.Linkname = cString(realname)
|
||||
return hdr, err
|
||||
}
|
||||
return hdr, nil
|
||||
return hdr, tr.err
|
||||
}
|
||||
|
||||
// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
|
||||
@@ -403,7 +385,6 @@ func parsePAX(r io.Reader) (map[string]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
sbuf := string(buf)
|
||||
|
||||
// For GNU PAX sparse format 0.0 support.
|
||||
// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
|
||||
@@ -412,17 +393,35 @@ func parsePAX(r io.Reader) (map[string]string, error) {
|
||||
headers := make(map[string]string)
|
||||
// Each record is constructed as
|
||||
// "%d %s=%s\n", length, keyword, value
|
||||
for len(sbuf) > 0 {
|
||||
key, value, residual, err := parsePAXRecord(sbuf)
|
||||
if err != nil {
|
||||
for len(buf) > 0 {
|
||||
// or the header was empty to start with.
|
||||
var sp int
|
||||
// The size field ends at the first space.
|
||||
sp = bytes.IndexByte(buf, ' ')
|
||||
if sp == -1 {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
sbuf = residual
|
||||
// Parse the first token as a decimal integer.
|
||||
n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
|
||||
if err != nil || n < 5 || int64(len(buf)) < n {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
// Extract everything between the decimal and the n -1 on the
|
||||
// beginning to eat the ' ', -1 on the end to skip the newline.
|
||||
var record []byte
|
||||
record, buf = buf[sp+1:n-1], buf[n:]
|
||||
// The first equals is guaranteed to mark the end of the key.
|
||||
// Everything else is value.
|
||||
eq := bytes.IndexByte(record, '=')
|
||||
if eq == -1 {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
key, value := record[:eq], record[eq+1:]
|
||||
|
||||
keyStr := string(key)
|
||||
if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
|
||||
// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
|
||||
sparseMap.WriteString(value)
|
||||
sparseMap.Write(value)
|
||||
sparseMap.Write([]byte{','})
|
||||
} else {
|
||||
// Normal key. Set the value in the headers map.
|
||||
@@ -437,42 +436,9 @@ func parsePAX(r io.Reader) (map[string]string, error) {
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// parsePAXRecord parses the input PAX record string into a key-value pair.
|
||||
// If parsing is successful, it will slice off the currently read record and
|
||||
// return the remainder as r.
|
||||
//
|
||||
// A PAX record is of the following form:
|
||||
// "%d %s=%s\n" % (size, key, value)
|
||||
func parsePAXRecord(s string) (k, v, r string, err error) {
|
||||
// The size field ends at the first space.
|
||||
sp := strings.IndexByte(s, ' ')
|
||||
if sp == -1 {
|
||||
return "", "", s, ErrHeader
|
||||
}
|
||||
|
||||
// Parse the first token as a decimal integer.
|
||||
n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
|
||||
if perr != nil || n < 5 || int64(len(s)) < n {
|
||||
return "", "", s, ErrHeader
|
||||
}
|
||||
|
||||
// Extract everything between the space and the final newline.
|
||||
rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
|
||||
if nl != "\n" {
|
||||
return "", "", s, ErrHeader
|
||||
}
|
||||
|
||||
// The first equals separates the key from the value.
|
||||
eq := strings.IndexByte(rec, '=')
|
||||
if eq == -1 {
|
||||
return "", "", s, ErrHeader
|
||||
}
|
||||
return rec[:eq], rec[eq+1:], rem, nil
|
||||
}
|
||||
|
||||
// parseString parses bytes as a NUL-terminated C-style string.
|
||||
// cString parses bytes as a NUL-terminated C-style string.
|
||||
// If a NUL byte is not found then the whole slice is returned as a string.
|
||||
func (*parser) parseString(b []byte) string {
|
||||
func cString(b []byte) string {
|
||||
n := 0
|
||||
for n < len(b) && b[n] != 0 {
|
||||
n++
|
||||
@@ -480,51 +446,19 @@ func (*parser) parseString(b []byte) string {
|
||||
return string(b[0:n])
|
||||
}
|
||||
|
||||
// parseNumeric parses the input as being encoded in either base-256 or octal.
|
||||
// This function may return negative numbers.
|
||||
// If parsing fails or an integer overflow occurs, err will be set.
|
||||
func (p *parser) parseNumeric(b []byte) int64 {
|
||||
// Check for base-256 (binary) format first.
|
||||
// If the first bit is set, then all following bits constitute a two's
|
||||
// complement encoded number in big-endian byte order.
|
||||
func (tr *Reader) octal(b []byte) int64 {
|
||||
// Check for binary format first.
|
||||
if len(b) > 0 && b[0]&0x80 != 0 {
|
||||
// Handling negative numbers relies on the following identity:
|
||||
// -a-1 == ^a
|
||||
//
|
||||
// If the number is negative, we use an inversion mask to invert the
|
||||
// data bytes and treat the value as an unsigned number.
|
||||
var inv byte // 0x00 if positive or zero, 0xff if negative
|
||||
if b[0]&0x40 != 0 {
|
||||
inv = 0xff
|
||||
}
|
||||
|
||||
var x uint64
|
||||
var x int64
|
||||
for i, c := range b {
|
||||
c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
|
||||
if i == 0 {
|
||||
c &= 0x7f // Ignore signal bit in first byte
|
||||
c &= 0x7f // ignore signal bit in first byte
|
||||
}
|
||||
if (x >> 56) > 0 {
|
||||
p.err = ErrHeader // Integer overflow
|
||||
return 0
|
||||
}
|
||||
x = x<<8 | uint64(c)
|
||||
x = x<<8 | int64(c)
|
||||
}
|
||||
if (x >> 63) > 0 {
|
||||
p.err = ErrHeader // Integer overflow
|
||||
return 0
|
||||
}
|
||||
if inv == 0xff {
|
||||
return ^int64(x)
|
||||
}
|
||||
return int64(x)
|
||||
return x
|
||||
}
|
||||
|
||||
// Normal case is base-8 (octal) format.
|
||||
return p.parseOctal(b)
|
||||
}
|
||||
|
||||
func (p *parser) parseOctal(b []byte) int64 {
|
||||
// Because unused fields are filled with NULs, we need
|
||||
// to skip leading NULs. Fields may also be padded with
|
||||
// spaces or NULs.
|
||||
@@ -535,55 +469,27 @@ func (p *parser) parseOctal(b []byte) int64 {
|
||||
if len(b) == 0 {
|
||||
return 0
|
||||
}
|
||||
x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
|
||||
if perr != nil {
|
||||
p.err = ErrHeader
|
||||
x, err := strconv.ParseUint(cString(b), 8, 64)
|
||||
if err != nil {
|
||||
tr.err = err
|
||||
}
|
||||
return int64(x)
|
||||
}
|
||||
|
||||
// skipUnread skips any unread bytes in the existing file entry, as well as any
|
||||
// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
|
||||
// encountered in the data portion; it is okay to hit io.EOF in the padding.
|
||||
//
|
||||
// Note that this function still works properly even when sparse files are being
|
||||
// used since numBytes returns the bytes remaining in the underlying io.Reader.
|
||||
func (tr *Reader) skipUnread() error {
|
||||
dataSkip := tr.numBytes() // Number of data bytes to skip
|
||||
totalSkip := dataSkip + tr.pad // Total number of bytes to skip
|
||||
// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
|
||||
func (tr *Reader) skipUnread() {
|
||||
nr := tr.numBytes() + tr.pad // number of bytes to skip
|
||||
tr.curr, tr.pad = nil, 0
|
||||
if tr.RawAccounting {
|
||||
_, tr.err = io.CopyN(tr.rawBytes, tr.r, totalSkip)
|
||||
return tr.err
|
||||
_, tr.err = io.CopyN(tr.rawBytes, tr.r, nr)
|
||||
return
|
||||
}
|
||||
// If possible, Seek to the last byte before the end of the data section.
|
||||
// Do this because Seek is often lazy about reporting errors; this will mask
|
||||
// the fact that the tar stream may be truncated. We can rely on the
|
||||
// io.CopyN done shortly afterwards to trigger any IO errors.
|
||||
var seekSkipped int64 // Number of bytes skipped via Seek
|
||||
if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
|
||||
// Not all io.Seeker can actually Seek. For example, os.Stdin implements
|
||||
// io.Seeker, but calling Seek always returns an error and performs
|
||||
// no action. Thus, we try an innocent seek to the current position
|
||||
// to see if Seek is really supported.
|
||||
pos1, err := sr.Seek(0, os.SEEK_CUR)
|
||||
if err == nil {
|
||||
// Seek seems supported, so perform the real Seek.
|
||||
pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
tr.err = err
|
||||
return tr.err
|
||||
}
|
||||
seekSkipped = pos2 - pos1
|
||||
if sr, ok := tr.r.(io.Seeker); ok {
|
||||
if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var copySkipped int64 // Number of bytes skipped via CopyN
|
||||
copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
|
||||
if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
|
||||
tr.err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return tr.err
|
||||
_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
|
||||
}
|
||||
|
||||
func (tr *Reader) verifyChecksum(header []byte) bool {
|
||||
@@ -591,19 +497,11 @@ func (tr *Reader) verifyChecksum(header []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
var p parser
|
||||
given := p.parseOctal(header[148:156])
|
||||
given := tr.octal(header[148:156])
|
||||
unsigned, signed := checksum(header)
|
||||
return p.err == nil && (given == unsigned || given == signed)
|
||||
return given == unsigned || given == signed
|
||||
}
|
||||
|
||||
// readHeader reads the next block header and assumes that the underlying reader
|
||||
// is already aligned to a block boundary.
|
||||
//
|
||||
// The err will be set to io.EOF only when one of the following occurs:
|
||||
// * Exactly 0 bytes are read and EOF is hit.
|
||||
// * Exactly 1 block of zeros is read and EOF is hit.
|
||||
// * At least 2 blocks of zeros are read.
|
||||
func (tr *Reader) readHeader() *Header {
|
||||
header := tr.hdrBuff[:]
|
||||
copy(header, zeroBlock)
|
||||
@@ -615,7 +513,7 @@ func (tr *Reader) readHeader() *Header {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil // io.EOF is okay here
|
||||
return nil
|
||||
}
|
||||
if tr.RawAccounting {
|
||||
if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
|
||||
@@ -632,7 +530,7 @@ func (tr *Reader) readHeader() *Header {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil // io.EOF is okay here
|
||||
return nil
|
||||
}
|
||||
if tr.RawAccounting {
|
||||
if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
|
||||
@@ -653,19 +551,22 @@ func (tr *Reader) readHeader() *Header {
|
||||
}
|
||||
|
||||
// Unpack
|
||||
var p parser
|
||||
hdr := new(Header)
|
||||
s := slicer(header)
|
||||
|
||||
hdr.Name = p.parseString(s.next(100))
|
||||
hdr.Mode = p.parseNumeric(s.next(8))
|
||||
hdr.Uid = int(p.parseNumeric(s.next(8)))
|
||||
hdr.Gid = int(p.parseNumeric(s.next(8)))
|
||||
hdr.Size = p.parseNumeric(s.next(12))
|
||||
hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
|
||||
hdr.Name = cString(s.next(100))
|
||||
hdr.Mode = tr.octal(s.next(8))
|
||||
hdr.Uid = int(tr.octal(s.next(8)))
|
||||
hdr.Gid = int(tr.octal(s.next(8)))
|
||||
hdr.Size = tr.octal(s.next(12))
|
||||
if hdr.Size < 0 {
|
||||
tr.err = ErrHeader
|
||||
return nil
|
||||
}
|
||||
hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
|
||||
s.next(8) // chksum
|
||||
hdr.Typeflag = s.next(1)[0]
|
||||
hdr.Linkname = p.parseString(s.next(100))
|
||||
hdr.Linkname = cString(s.next(100))
|
||||
|
||||
// The remainder of the header depends on the value of magic.
|
||||
// The original (v7) version of tar had no explicit magic field,
|
||||
@@ -685,76 +586,70 @@ func (tr *Reader) readHeader() *Header {
|
||||
|
||||
switch format {
|
||||
case "posix", "gnu", "star":
|
||||
hdr.Uname = p.parseString(s.next(32))
|
||||
hdr.Gname = p.parseString(s.next(32))
|
||||
hdr.Uname = cString(s.next(32))
|
||||
hdr.Gname = cString(s.next(32))
|
||||
devmajor := s.next(8)
|
||||
devminor := s.next(8)
|
||||
if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
|
||||
hdr.Devmajor = p.parseNumeric(devmajor)
|
||||
hdr.Devminor = p.parseNumeric(devminor)
|
||||
hdr.Devmajor = tr.octal(devmajor)
|
||||
hdr.Devminor = tr.octal(devminor)
|
||||
}
|
||||
var prefix string
|
||||
switch format {
|
||||
case "posix", "gnu":
|
||||
prefix = p.parseString(s.next(155))
|
||||
prefix = cString(s.next(155))
|
||||
case "star":
|
||||
prefix = p.parseString(s.next(131))
|
||||
hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
|
||||
hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
|
||||
prefix = cString(s.next(131))
|
||||
hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
|
||||
hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
|
||||
}
|
||||
if len(prefix) > 0 {
|
||||
hdr.Name = prefix + "/" + hdr.Name
|
||||
}
|
||||
}
|
||||
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
return nil
|
||||
}
|
||||
|
||||
nb := hdr.Size
|
||||
if isHeaderOnlyType(hdr.Typeflag) {
|
||||
nb = 0
|
||||
}
|
||||
if nb < 0 {
|
||||
if tr.err != nil {
|
||||
tr.err = ErrHeader
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the current file reader.
|
||||
// Maximum value of hdr.Size is 64 GB (12 octal digits),
|
||||
// so there's no risk of int64 overflowing.
|
||||
nb := int64(hdr.Size)
|
||||
tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
|
||||
|
||||
// Set the current file reader.
|
||||
tr.curr = ®FileReader{r: tr.r, nb: nb}
|
||||
|
||||
// Check for old GNU sparse format entry.
|
||||
if hdr.Typeflag == TypeGNUSparse {
|
||||
// Get the real size of the file.
|
||||
hdr.Size = p.parseNumeric(header[483:495])
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
return nil
|
||||
}
|
||||
hdr.Size = tr.octal(header[483:495])
|
||||
|
||||
// Read the sparse map.
|
||||
sp := tr.readOldGNUSparseMap(header)
|
||||
if tr.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Current file is a GNU sparse file. Update the current file reader.
|
||||
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
|
||||
if tr.err != nil {
|
||||
return nil
|
||||
}
|
||||
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
|
||||
}
|
||||
|
||||
return hdr
|
||||
}
|
||||
|
||||
// A sparseEntry holds a single entry in a sparse file's sparse map.
|
||||
// A sparse entry indicates the offset and size in a sparse file of a
|
||||
// block of data.
|
||||
type sparseEntry struct {
|
||||
offset int64
|
||||
numBytes int64
|
||||
}
|
||||
|
||||
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
|
||||
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
|
||||
// then one or more extension headers are used to store the rest of the sparse map.
|
||||
func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
|
||||
var p parser
|
||||
isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
|
||||
spCap := oldGNUSparseMainHeaderNumEntries
|
||||
if isExtended {
|
||||
@@ -765,10 +660,10 @@ func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
|
||||
|
||||
// Read the four entries from the main tar header
|
||||
for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
|
||||
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
|
||||
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
offset := tr.octal(s.next(oldGNUSparseOffsetSize))
|
||||
numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
|
||||
if tr.err != nil {
|
||||
tr.err = ErrHeader
|
||||
return nil
|
||||
}
|
||||
if offset == 0 && numBytes == 0 {
|
||||
@@ -792,10 +687,10 @@ func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
|
||||
isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
|
||||
s = slicer(sparseHeader)
|
||||
for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
|
||||
offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
|
||||
numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
|
||||
if p.err != nil {
|
||||
tr.err = p.err
|
||||
offset := tr.octal(s.next(oldGNUSparseOffsetSize))
|
||||
numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
|
||||
if tr.err != nil {
|
||||
tr.err = ErrHeader
|
||||
return nil
|
||||
}
|
||||
if offset == 0 && numBytes == 0 {
|
||||
@@ -807,111 +702,134 @@ func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
|
||||
return sp
|
||||
}
|
||||
|
||||
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
|
||||
// version 1.0. The format of the sparse map consists of a series of
|
||||
// newline-terminated numeric fields. The first field is the number of entries
|
||||
// and is always present. Following this are the entries, consisting of two
|
||||
// fields (offset, numBytes). This function must stop reading at the end
|
||||
// boundary of the block containing the last newline.
|
||||
//
|
||||
// Note that the GNU manual says that numeric values should be encoded in octal
|
||||
// format. However, the GNU tar utility itself outputs these values in decimal.
|
||||
// As such, this library treats values as being encoded in decimal.
|
||||
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0.
|
||||
// The sparse map is stored just before the file data and padded out to the nearest block boundary.
|
||||
func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
|
||||
var cntNewline int64
|
||||
var buf bytes.Buffer
|
||||
var blk = make([]byte, blockSize)
|
||||
buf := make([]byte, 2*blockSize)
|
||||
sparseHeader := buf[:blockSize]
|
||||
|
||||
// feedTokens copies data in numBlock chunks from r into buf until there are
|
||||
// at least cnt newlines in buf. It will not read more blocks than needed.
|
||||
var feedTokens = func(cnt int64) error {
|
||||
for cntNewline < cnt {
|
||||
if _, err := io.ReadFull(r, blk); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
// readDecimal is a helper function to read a decimal integer from the sparse map
|
||||
// while making sure to read from the file in blocks of size blockSize
|
||||
readDecimal := func() (int64, error) {
|
||||
// Look for newline
|
||||
nl := bytes.IndexByte(sparseHeader, '\n')
|
||||
if nl == -1 {
|
||||
if len(sparseHeader) >= blockSize {
|
||||
// This is an error
|
||||
return 0, ErrHeader
|
||||
}
|
||||
buf.Write(blk)
|
||||
for _, c := range blk {
|
||||
if c == '\n' {
|
||||
cntNewline++
|
||||
oldLen := len(sparseHeader)
|
||||
newLen := oldLen + blockSize
|
||||
if cap(sparseHeader) < newLen {
|
||||
// There's more header, but we need to make room for the next block
|
||||
copy(buf, sparseHeader)
|
||||
sparseHeader = buf[:newLen]
|
||||
} else {
|
||||
// There's more header, and we can just reslice
|
||||
sparseHeader = sparseHeader[:newLen]
|
||||
}
|
||||
|
||||
// Now that sparseHeader is large enough, read next block
|
||||
if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// leaving this function for io.Reader makes it more testable
|
||||
if tr, ok := r.(*Reader); ok && tr.RawAccounting {
|
||||
if _, err := tr.rawBytes.Write(sparseHeader[oldLen:newLen]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Look for a newline in the new data
|
||||
nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n')
|
||||
if nl == -1 {
|
||||
// This is an error
|
||||
return 0, ErrHeader
|
||||
}
|
||||
nl += oldLen // We want the position from the beginning
|
||||
}
|
||||
return nil
|
||||
// Now that we've found a newline, read a number
|
||||
n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0)
|
||||
if err != nil {
|
||||
return 0, ErrHeader
|
||||
}
|
||||
|
||||
// Update sparseHeader to consume this number
|
||||
sparseHeader = sparseHeader[nl+1:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// nextToken gets the next token delimited by a newline. This assumes that
|
||||
// at least one newline exists in the buffer.
|
||||
var nextToken = func() string {
|
||||
cntNewline--
|
||||
tok, _ := buf.ReadString('\n')
|
||||
return tok[:len(tok)-1] // Cut off newline
|
||||
}
|
||||
|
||||
// Parse for the number of entries.
|
||||
// Use integer overflow resistant math to check this.
|
||||
if err := feedTokens(1); err != nil {
|
||||
// Read the first block
|
||||
if _, err := io.ReadFull(r, sparseHeader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
|
||||
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
|
||||
return nil, ErrHeader
|
||||
// leaving this function for io.Reader makes it more testable
|
||||
if tr, ok := r.(*Reader); ok && tr.RawAccounting {
|
||||
if _, err := tr.rawBytes.Write(sparseHeader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Parse for all member entries.
|
||||
// numEntries is trusted after this since a potential attacker must have
|
||||
// committed resources proportional to what this library used.
|
||||
if err := feedTokens(2 * numEntries); err != nil {
|
||||
// The first line contains the number of entries
|
||||
numEntries, err := readDecimal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read all the entries
|
||||
sp := make([]sparseEntry, 0, numEntries)
|
||||
for i := int64(0); i < numEntries; i++ {
|
||||
offset, err := strconv.ParseInt(nextToken(), 10, 64)
|
||||
// Read the offset
|
||||
offset, err := readDecimal()
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
return nil, err
|
||||
}
|
||||
numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
|
||||
// Read numBytes
|
||||
numBytes, err := readDecimal()
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
||||
}
|
||||
|
||||
return sp, nil
|
||||
}
|
||||
|
||||
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
|
||||
// version 0.1. The sparse map is stored in the PAX headers.
|
||||
func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
|
||||
// Get number of entries.
|
||||
// Use integer overflow resistant math to check this.
|
||||
numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
|
||||
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
|
||||
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
|
||||
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
|
||||
// The sparse map is stored in the PAX headers.
|
||||
func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
|
||||
// Get number of entries
|
||||
numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
|
||||
if !ok {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
|
||||
// There should be two numbers in sparseMap for each entry.
|
||||
sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
|
||||
sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
|
||||
|
||||
// There should be two numbers in sparseMap for each entry
|
||||
if int64(len(sparseMap)) != 2*numEntries {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
|
||||
// Loop through the entries in the sparse map.
|
||||
// numEntries is trusted now.
|
||||
// Loop through the entries in the sparse map
|
||||
sp := make([]sparseEntry, 0, numEntries)
|
||||
for i := int64(0); i < numEntries; i++ {
|
||||
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
|
||||
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
|
||||
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
||||
}
|
||||
|
||||
return sp, nil
|
||||
}
|
||||
|
||||
@@ -928,18 +846,10 @@ func (tr *Reader) numBytes() int64 {
|
||||
// Read reads from the current entry in the tar archive.
|
||||
// It returns 0, io.EOF when it reaches the end of that entry,
|
||||
// until Next is called to advance to the next entry.
|
||||
//
|
||||
// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
|
||||
// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
|
||||
// the Header.Size claims.
|
||||
func (tr *Reader) Read(b []byte) (n int, err error) {
|
||||
if tr.err != nil {
|
||||
return 0, tr.err
|
||||
}
|
||||
if tr.curr == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, err = tr.curr.Read(b)
|
||||
if err != nil && err != io.EOF {
|
||||
tr.err = err
|
||||
@@ -969,33 +879,9 @@ func (rfr *regFileReader) numBytes() int64 {
|
||||
return rfr.nb
|
||||
}
|
||||
|
||||
// newSparseFileReader creates a new sparseFileReader, but validates all of the
|
||||
// sparse entries before doing so.
|
||||
func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
|
||||
if total < 0 {
|
||||
return nil, ErrHeader // Total size cannot be negative
|
||||
}
|
||||
|
||||
// Validate all sparse entries. These are the same checks as performed by
|
||||
// the BSD tar utility.
|
||||
for i, s := range sp {
|
||||
switch {
|
||||
case s.offset < 0 || s.numBytes < 0:
|
||||
return nil, ErrHeader // Negative values are never okay
|
||||
case s.offset > math.MaxInt64-s.numBytes:
|
||||
return nil, ErrHeader // Integer overflow with large length
|
||||
case s.offset+s.numBytes > total:
|
||||
return nil, ErrHeader // Region extends beyond the "real" size
|
||||
case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
|
||||
return nil, ErrHeader // Regions can't overlap and must be in order
|
||||
}
|
||||
}
|
||||
return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
|
||||
}
|
||||
|
||||
// readHole reads a sparse hole ending at endOffset.
|
||||
func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
|
||||
n64 := endOffset - sfr.pos
|
||||
// readHole reads a sparse file hole ending at offset toOffset
|
||||
func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
|
||||
n64 := toOffset - sfr.pos
|
||||
if n64 > int64(len(b)) {
|
||||
n64 = int64(len(b))
|
||||
}
|
||||
@@ -1009,54 +895,49 @@ func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
|
||||
|
||||
// Read reads the sparse file data in expanded form.
|
||||
func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
|
||||
// Skip past all empty fragments.
|
||||
for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
|
||||
sfr.sp = sfr.sp[1:]
|
||||
}
|
||||
|
||||
// If there are no more fragments, then it is possible that there
|
||||
// is one last sparse hole.
|
||||
if len(sfr.sp) == 0 {
|
||||
// This behavior matches the BSD tar utility.
|
||||
// However, GNU tar stops returning data even if sfr.total is unmet.
|
||||
if sfr.pos < sfr.total {
|
||||
return sfr.readHole(b, sfr.total), nil
|
||||
// No more data fragments to read from.
|
||||
if sfr.pos < sfr.tot {
|
||||
// We're in the last hole
|
||||
n = sfr.readHole(b, sfr.tot)
|
||||
return
|
||||
}
|
||||
// Otherwise, we're at the end of the file
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// In front of a data fragment, so read a hole.
|
||||
if sfr.tot < sfr.sp[0].offset {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if sfr.pos < sfr.sp[0].offset {
|
||||
return sfr.readHole(b, sfr.sp[0].offset), nil
|
||||
// We're in a hole
|
||||
n = sfr.readHole(b, sfr.sp[0].offset)
|
||||
return
|
||||
}
|
||||
|
||||
// In a data fragment, so read from it.
|
||||
// This math is overflow free since we verify that offset and numBytes can
|
||||
// be safely added when creating the sparseFileReader.
|
||||
endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
|
||||
bytesLeft := endPos - sfr.pos // Bytes left in fragment
|
||||
// We're not in a hole, so we'll read from the next data fragment
|
||||
posInFragment := sfr.pos - sfr.sp[0].offset
|
||||
bytesLeft := sfr.sp[0].numBytes - posInFragment
|
||||
if int64(len(b)) > bytesLeft {
|
||||
b = b[:bytesLeft]
|
||||
b = b[0:bytesLeft]
|
||||
}
|
||||
|
||||
n, err = sfr.rfr.Read(b)
|
||||
sfr.pos += int64(n)
|
||||
if err == io.EOF {
|
||||
if sfr.pos < endPos {
|
||||
err = io.ErrUnexpectedEOF // There was supposed to be more data
|
||||
} else if sfr.pos < sfr.total {
|
||||
err = nil // There is still an implicit sparse hole at the end
|
||||
}
|
||||
|
||||
if int64(n) == bytesLeft {
|
||||
// We're done with this fragment
|
||||
sfr.sp = sfr.sp[1:]
|
||||
}
|
||||
|
||||
if sfr.pos == endPos {
|
||||
sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
|
||||
if err == io.EOF && sfr.pos < sfr.tot {
|
||||
// We reached the end of the last fragment's data, but there's a final hole
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
return
|
||||
}
|
||||
|
||||
// numBytes returns the number of bytes left to read in the sparse file's
|
||||
// sparse-encoded data in the tar archive.
|
||||
func (sfr *sparseFileReader) numBytes() int64 {
|
||||
return sfr.rfr.numBytes()
|
||||
return sfr.rfr.nb
|
||||
}
|
||||
|
||||
222
vendor/github.com/vbatts/tar-split/archive/tar/writer.go
generated
vendored
222
vendor/github.com/vbatts/tar-split/archive/tar/writer.go
generated
vendored
@@ -12,8 +12,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -23,6 +23,7 @@ var (
|
||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errNameTooLong = errors.New("archive/tar: name too long")
|
||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||
)
|
||||
|
||||
@@ -42,10 +43,6 @@ type Writer struct {
|
||||
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
err error // Last error seen
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
|
||||
|
||||
@@ -72,9 +69,17 @@ func (tw *Writer) Flush() error {
|
||||
}
|
||||
|
||||
// Write s into b, terminating it with a NUL if there is room.
|
||||
func (f *formatter) formatString(b []byte, s string) {
|
||||
// If the value is too long for the field and allowPax is true add a paxheader record instead
|
||||
func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
||||
needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
if len(s) > len(b) {
|
||||
f.err = ErrFieldTooLong
|
||||
if tw.err == nil {
|
||||
tw.err = ErrFieldTooLong
|
||||
}
|
||||
return
|
||||
}
|
||||
ascii := toASCII(s)
|
||||
@@ -85,40 +90,40 @@ func (f *formatter) formatString(b []byte, s string) {
|
||||
}
|
||||
|
||||
// Encode x as an octal ASCII string and write it into b with leading zeros.
|
||||
func (f *formatter) formatOctal(b []byte, x int64) {
|
||||
func (tw *Writer) octal(b []byte, x int64) {
|
||||
s := strconv.FormatInt(x, 8)
|
||||
// leading zeros, but leave room for a NUL.
|
||||
for len(s)+1 < len(b) {
|
||||
s = "0" + s
|
||||
}
|
||||
f.formatString(b, s)
|
||||
tw.cString(b, s, false, paxNone, nil)
|
||||
}
|
||||
|
||||
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
|
||||
// encoding. Unlike octal encoding, base-256 encoding does not require that the
|
||||
// string ends with a NUL character. Thus, all n bytes are available for output.
|
||||
//
|
||||
// If operating in binary mode, this assumes strict GNU binary mode; which means
|
||||
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
|
||||
// equivalent to the sign bit in two's complement form.
|
||||
func fitsInBase256(n int, x int64) bool {
|
||||
var binBits = uint(n-1) * 8
|
||||
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
|
||||
}
|
||||
|
||||
// Write x into b, as binary (GNUtar/star extension).
|
||||
func (f *formatter) formatNumeric(b []byte, x int64) {
|
||||
if fitsInBase256(len(b), x) {
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
b[i] = byte(x)
|
||||
x >>= 8
|
||||
}
|
||||
b[0] |= 0x80 // Highest bit indicates binary format
|
||||
// Write x into b, either as octal or as binary (GNUtar/star extension).
|
||||
// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
|
||||
func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
||||
// Try octal first.
|
||||
s := strconv.FormatInt(x, 8)
|
||||
if len(s) < len(b) {
|
||||
tw.octal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
f.formatOctal(b, 0) // Last resort, just write zero
|
||||
f.err = ErrFieldTooLong
|
||||
// If it is too long for octal, and pax is preferred, use a pax header
|
||||
if allowPax && tw.preferPax {
|
||||
tw.octal(b, 0)
|
||||
s := strconv.FormatInt(x, 10)
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
|
||||
// Too big: use binary (big-endian).
|
||||
tw.usedBinary = true
|
||||
for i := len(b) - 1; x > 0 && i >= 0; i-- {
|
||||
b[i] = byte(x)
|
||||
x >>= 8
|
||||
}
|
||||
b[0] |= 0x80 // highest bit indicates binary format
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -157,7 +162,6 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
// subsecond time resolution, but for now let's just capture
|
||||
// too long fields or non ascii characters
|
||||
|
||||
var f formatter
|
||||
var header []byte
|
||||
|
||||
// We need to select which scratch buffer to use carefully,
|
||||
@@ -172,40 +176,10 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
copy(header, zeroBlock)
|
||||
s := slicer(header)
|
||||
|
||||
// Wrappers around formatter that automatically sets paxHeaders if the
|
||||
// argument extends beyond the capacity of the input byte slice.
|
||||
var formatString = func(b []byte, s string, paxKeyword string) {
|
||||
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
|
||||
// Try octal first.
|
||||
s := strconv.FormatInt(x, 8)
|
||||
if len(s) < len(b) {
|
||||
f.formatOctal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
// If it is too long for octal, and PAX is preferred, use a PAX header.
|
||||
if paxKeyword != paxNone && tw.preferPax {
|
||||
f.formatOctal(b, 0)
|
||||
s := strconv.FormatInt(x, 10)
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
|
||||
tw.usedBinary = true
|
||||
f.formatNumeric(b, x)
|
||||
}
|
||||
|
||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
pathHeaderBytes := s.next(fileNameSize)
|
||||
|
||||
formatString(pathHeaderBytes, hdr.Name, paxPath)
|
||||
tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
|
||||
|
||||
// Handle out of range ModTime carefully.
|
||||
var modTime int64
|
||||
@@ -213,25 +187,25 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
modTime = hdr.ModTime.Unix()
|
||||
}
|
||||
|
||||
f.formatOctal(s.next(8), hdr.Mode) // 100:108
|
||||
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
|
||||
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
|
||||
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
|
||||
formatNumeric(s.next(12), modTime, paxNone) // 136:148 --- consider using pax for finer granularity
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
tw.octal(s.next(8), hdr.Mode) // 100:108
|
||||
tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
|
||||
tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
|
||||
tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
|
||||
tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
|
||||
formatString(s.next(100), hdr.Linkname, paxLinkpath)
|
||||
tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
|
||||
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
|
||||
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
|
||||
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
|
||||
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
|
||||
tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
|
||||
tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
|
||||
tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
|
||||
|
||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
prefixHeaderBytes := s.next(155)
|
||||
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
|
||||
tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
|
||||
|
||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||
if tw.usedBinary {
|
||||
@@ -241,26 +215,37 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
prefix, suffix, ok := splitUSTARPath(hdr.Name)
|
||||
if ok {
|
||||
// Since we can encode in USTAR format, disable PAX header.
|
||||
delete(paxHeaders, paxPath)
|
||||
suffix := hdr.Name
|
||||
prefix := ""
|
||||
if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
|
||||
var err error
|
||||
prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
|
||||
if err == nil {
|
||||
// ok we can use a ustar long name instead of pax, now correct the fields
|
||||
|
||||
// Update the path fields
|
||||
formatString(pathHeaderBytes, suffix, paxNone)
|
||||
formatString(prefixHeaderBytes, prefix, paxNone)
|
||||
// remove the path field from the pax header. this will suppress the pax header
|
||||
delete(paxHeaders, paxPath)
|
||||
|
||||
// update the path fields
|
||||
tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
|
||||
tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
|
||||
|
||||
// Use the ustar magic if we used ustar long names.
|
||||
if len(prefix) > 0 && !tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar\x00"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The chksum field is terminated by a NUL and a space.
|
||||
// This is different from the other octal fields.
|
||||
chksum, _ := checksum(header)
|
||||
f.formatOctal(header[148:155], chksum) // Never fails
|
||||
tw.octal(header[148:155], chksum)
|
||||
header[155] = ' '
|
||||
|
||||
// Check if there were any formatting errors.
|
||||
if f.err != nil {
|
||||
tw.err = f.err
|
||||
if tw.err != nil {
|
||||
// problem with header; probably integer too big for a field.
|
||||
return tw.err
|
||||
}
|
||||
|
||||
@@ -285,25 +270,28 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
// writeUSTARLongName splits a USTAR long name hdr.Name.
|
||||
// name must be < 256 characters. errNameTooLong is returned
|
||||
// if hdr.Name can't be split. The splitting heuristic
|
||||
// is compatible with gnu tar.
|
||||
func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
|
||||
length := len(name)
|
||||
if length <= fileNameSize || !isASCII(name) {
|
||||
return "", "", false
|
||||
} else if length > fileNamePrefixSize+1 {
|
||||
if length > fileNamePrefixSize+1 {
|
||||
length = fileNamePrefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||
plen := i // plen is length of prefix
|
||||
// nlen contains the resulting length in the name field.
|
||||
// plen contains the resulting length in the prefix field.
|
||||
nlen := len(name) - i - 1
|
||||
plen := i
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
return "", "", false
|
||||
err = errNameTooLong
|
||||
return
|
||||
}
|
||||
return name[:i], name[i+1:], true
|
||||
prefix, suffix = name[:i], name[i+1:]
|
||||
return
|
||||
}
|
||||
|
||||
// writePaxHeader writes an extended pax header to the
|
||||
@@ -316,11 +304,11 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro
|
||||
// succeed, and seems harmless enough.
|
||||
ext.ModTime = hdr.ModTime
|
||||
// The spec asks that we namespace our pseudo files
|
||||
// with the current pid. However, this results in differing outputs
|
||||
// for identical inputs. As such, the constant 0 is now used instead.
|
||||
// golang.org/issue/12358
|
||||
// with the current pid.
|
||||
pid := os.Getpid()
|
||||
dir, file := path.Split(hdr.Name)
|
||||
fullName := path.Join(dir, "PaxHeaders.0", file)
|
||||
fullName := path.Join(dir,
|
||||
fmt.Sprintf("PaxHeaders.%d", pid), file)
|
||||
|
||||
ascii := toASCII(fullName)
|
||||
if len(ascii) > 100 {
|
||||
@@ -330,15 +318,8 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro
|
||||
// Construct the body
|
||||
var buf bytes.Buffer
|
||||
|
||||
// Keys are sorted before writing to body to allow deterministic output.
|
||||
var keys []string
|
||||
for k := range paxHeaders {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
|
||||
for k, v := range paxHeaders {
|
||||
fmt.Fprint(&buf, paxHeader(k+"="+v))
|
||||
}
|
||||
|
||||
ext.Size = int64(len(buf.Bytes()))
|
||||
@@ -354,18 +335,17 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatPAXRecord formats a single PAX record, prefixing it with the
|
||||
// appropriate length.
|
||||
func formatPAXRecord(k, v string) string {
|
||||
const padding = 3 // Extra padding for ' ', '=', and '\n'
|
||||
size := len(k) + len(v) + padding
|
||||
// paxHeader formats a single pax record, prefixing it with the appropriate length
|
||||
func paxHeader(msg string) string {
|
||||
const padding = 2 // Extra padding for space and newline
|
||||
size := len(msg) + padding
|
||||
size += len(strconv.Itoa(size))
|
||||
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
|
||||
// Final adjustment if adding size field increased the record size.
|
||||
record := fmt.Sprintf("%d %s\n", size, msg)
|
||||
if len(record) != size {
|
||||
// Final adjustment if adding size increased
|
||||
// the number of digits in size
|
||||
size = len(record)
|
||||
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
record = fmt.Sprintf("%d %s\n", size, msg)
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user