Update vendor

This commit is contained in:
Ettore Di Giacinto
2021-03-11 17:29:15 +01:00
parent 0028dd3a92
commit f8989e464e
389 changed files with 75154 additions and 1130 deletions

388
vendor/github.com/docker/cli/cli/trust/trust.go generated vendored Normal file
View File

@@ -0,0 +1,388 @@
package trust
import (
"context"
"encoding/json"
"io"
"net"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"time"
cliconfig "github.com/docker/cli/cli/config"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/client/auth/challenge"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/registry"
"github.com/docker/go-connections/tlsconfig"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/client"
"github.com/theupdateframework/notary/passphrase"
"github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/trustmanager"
"github.com/theupdateframework/notary/trustpinning"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
)
var (
// ReleasesRole is the role named "releases"
ReleasesRole = data.RoleName(path.Join(data.CanonicalTargetsRole.String(), "releases"))
// ActionsPullOnly defines the actions for read-only interactions with a Notary Repository
ActionsPullOnly = []string{"pull"}
// ActionsPushAndPull defines the actions for read-write interactions with a Notary Repository
ActionsPushAndPull = []string{"pull", "push"}
// NotaryServer is the endpoint serving the Notary trust server
NotaryServer = "https://notary.docker.io"
)
// GetTrustDirectory returns the base trust directory name
func GetTrustDirectory() string {
return filepath.Join(cliconfig.Dir(), "trust")
}
// certificateDirectory returns the directory containing
// TLS certificates for the given server. An error is
// returned if there was an error parsing the server string.
func certificateDirectory(server string) (string, error) {
u, err := url.Parse(server)
if err != nil {
return "", err
}
return filepath.Join(cliconfig.Dir(), "tls", u.Host), nil
}
// Server returns the base URL for the trust server.
func Server(index *registrytypes.IndexInfo) (string, error) {
if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" {
urlObj, err := url.Parse(s)
if err != nil || urlObj.Scheme != "https" {
return "", errors.Errorf("valid https URL required for trust server, got %s", s)
}
return s, nil
}
if index.Official {
return NotaryServer, nil
}
return "https://" + index.Name, nil
}
type simpleCredentialStore struct {
auth types.AuthConfig
}
func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) {
return scs.auth.Username, scs.auth.Password
}
func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string {
return scs.auth.IdentityToken
}
func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) {
}
// GetNotaryRepository returns a NotaryRepository which stores all the
// information needed to operate on a notary repository.
// It creates an HTTP transport providing authentication support.
func GetNotaryRepository(in io.Reader, out io.Writer, userAgent string, repoInfo *registry.RepositoryInfo, authConfig *types.AuthConfig, actions ...string) (client.Repository, error) {
server, err := Server(repoInfo.Index)
if err != nil {
return nil, err
}
var cfg = tlsconfig.ClientDefault()
cfg.InsecureSkipVerify = !repoInfo.Index.Secure
// Get certificate base directory
certDir, err := certificateDirectory(server)
if err != nil {
return nil, err
}
logrus.Debugf("reading certificate directory: %s", certDir)
if err := registry.ReadCertsDirectory(cfg, certDir); err != nil {
return nil, err
}
base := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: cfg,
DisableKeepAlives: true,
}
// Skip configuration headers since request is not going to Docker daemon
modifiers := registry.Headers(userAgent, http.Header{})
authTransport := transport.NewTransport(base, modifiers...)
pingClient := &http.Client{
Transport: authTransport,
Timeout: 5 * time.Second,
}
endpointStr := server + "/v2/"
req, err := http.NewRequest("GET", endpointStr, nil)
if err != nil {
return nil, err
}
challengeManager := challenge.NewSimpleManager()
resp, err := pingClient.Do(req)
if err != nil {
// Ignore error on ping to operate in offline mode
logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err)
} else {
defer resp.Body.Close()
// Add response to the challenge manager to parse out
// authentication header and register authentication method
if err := challengeManager.AddResponse(resp); err != nil {
return nil, err
}
}
scope := auth.RepositoryScope{
Repository: repoInfo.Name.Name(),
Actions: actions,
Class: repoInfo.Class,
}
creds := simpleCredentialStore{auth: *authConfig}
tokenHandlerOptions := auth.TokenHandlerOptions{
Transport: authTransport,
Credentials: creds,
Scopes: []auth.Scope{scope},
ClientID: registry.AuthClientID,
}
tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
basicHandler := auth.NewBasicHandler(creds)
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
tr := transport.NewTransport(base, modifiers...)
return client.NewFileCachedRepository(
GetTrustDirectory(),
data.GUN(repoInfo.Name.Name()),
server,
tr,
GetPassphraseRetriever(in, out),
trustpinning.TrustPinConfig{})
}
// GetPassphraseRetriever returns a passphrase retriever that utilizes Content Trust env vars
func GetPassphraseRetriever(in io.Reader, out io.Writer) notary.PassRetriever {
aliasMap := map[string]string{
"root": "root",
"snapshot": "repository",
"targets": "repository",
"default": "repository",
}
baseRetriever := passphrase.PromptRetrieverWithInOut(in, out, aliasMap)
env := map[string]string{
"root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"),
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
"targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
"default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
}
return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
if v := env[alias]; v != "" {
return v, numAttempts > 1, nil
}
// For non-root roles, we can also try the "default" alias if it is specified
if v := env["default"]; v != "" && alias != data.CanonicalRootRole.String() {
return v, numAttempts > 1, nil
}
return baseRetriever(keyName, alias, createNew, numAttempts)
}
}
// NotaryError formats an error message received from the notary service
func NotaryError(repoName string, err error) error {
switch err.(type) {
case *json.SyntaxError:
logrus.Debugf("Notary syntax error: %s", err)
return errors.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName)
case signed.ErrExpired:
return errors.Errorf("Error: remote repository %s out-of-date: %v", repoName, err)
case trustmanager.ErrKeyNotFound:
return errors.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err)
case storage.NetworkError:
return errors.Errorf("Error: error contacting notary server: %v", err)
case storage.ErrMetaNotFound:
return errors.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err)
case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType:
return errors.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err)
case signed.ErrNoKeys:
return errors.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err)
case signed.ErrLowVersion:
return errors.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err)
case signed.ErrRoleThreshold:
return errors.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err)
case client.ErrRepositoryNotExist:
return errors.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err)
case signed.ErrInsufficientSignatures:
return errors.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err)
}
return err
}
// GetSignableRoles returns a list of roles for which we have valid signing
// keys, given a notary repository and a target
func GetSignableRoles(repo client.Repository, target *client.Target) ([]data.RoleName, error) {
var signableRoles []data.RoleName
// translate the full key names, which includes the GUN, into just the key IDs
allCanonicalKeyIDs := make(map[string]struct{})
for fullKeyID := range repo.GetCryptoService().ListAllKeys() {
allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{}
}
allDelegationRoles, err := repo.GetDelegationRoles()
if err != nil {
return signableRoles, err
}
// if there are no delegation roles, then just try to sign it into the targets role
if len(allDelegationRoles) == 0 {
signableRoles = append(signableRoles, data.CanonicalTargetsRole)
return signableRoles, nil
}
// there are delegation roles, find every delegation role we have a key for, and
// attempt to sign into into all those roles.
for _, delegationRole := range allDelegationRoles {
// We do not support signing any delegation role that isn't a direct child of the targets role.
// Also don't bother checking the keys if we can't add the target
// to this role due to path restrictions
if path.Dir(delegationRole.Name.String()) != data.CanonicalTargetsRole.String() || !delegationRole.CheckPaths(target.Name) {
continue
}
for _, canonicalKeyID := range delegationRole.KeyIDs {
if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok {
signableRoles = append(signableRoles, delegationRole.Name)
break
}
}
}
if len(signableRoles) == 0 {
return signableRoles, errors.Errorf("no valid signing keys for delegation roles")
}
return signableRoles, nil
}
// ImageRefAndAuth contains all reference information and the auth config for an image request
type ImageRefAndAuth struct {
original string
authConfig *types.AuthConfig
reference reference.Named
repoInfo *registry.RepositoryInfo
tag string
digest digest.Digest
}
// GetImageReferencesAndAuth retrieves the necessary reference and auth information for an image name
// as an ImageRefAndAuth struct
func GetImageReferencesAndAuth(ctx context.Context, rs registry.Service,
authResolver func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig,
imgName string,
) (ImageRefAndAuth, error) {
ref, err := reference.ParseNormalizedNamed(imgName)
if err != nil {
return ImageRefAndAuth{}, err
}
// Resolve the Repository name from fqn to RepositoryInfo
var repoInfo *registry.RepositoryInfo
if rs != nil {
repoInfo, err = rs.ResolveRepository(ref)
} else {
repoInfo, err = registry.ParseRepositoryInfo(ref)
}
if err != nil {
return ImageRefAndAuth{}, err
}
authConfig := authResolver(ctx, repoInfo.Index)
return ImageRefAndAuth{
original: imgName,
authConfig: &authConfig,
reference: ref,
repoInfo: repoInfo,
tag: getTag(ref),
digest: getDigest(ref),
}, nil
}
func getTag(ref reference.Named) string {
switch x := ref.(type) {
case reference.Canonical, reference.Digested:
return ""
case reference.NamedTagged:
return x.Tag()
default:
return ""
}
}
func getDigest(ref reference.Named) digest.Digest {
switch x := ref.(type) {
case reference.Canonical:
return x.Digest()
case reference.Digested:
return x.Digest()
default:
return digest.Digest("")
}
}
// AuthConfig returns the auth information (username, etc) for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) AuthConfig() *types.AuthConfig {
return imgRefAuth.authConfig
}
// Reference returns the Image reference for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) Reference() reference.Named {
return imgRefAuth.reference
}
// RepoInfo returns the repository information for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) RepoInfo() *registry.RepositoryInfo {
return imgRefAuth.repoInfo
}
// Tag returns the Image tag for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) Tag() string {
return imgRefAuth.tag
}
// Digest returns the Image digest for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) Digest() digest.Digest {
return imgRefAuth.digest
}
// Name returns the image name used to initialize the ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) Name() string {
return imgRefAuth.original
}

View File

@@ -0,0 +1,13 @@
package metrics
import "github.com/docker/go-metrics"
const (
// NamespacePrefix is the namespace of prometheus metrics
NamespacePrefix = "registry"
)
var (
// StorageNamespace is the prometheus namespace of blob/cache related operations
StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil)
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,9 @@
// Package v2 describes routes, urls and the error codes used in the Docker
// Registry JSON HTTP API V2. In addition to declarations, descriptors are
// provided for routes and error codes that can be used for implementation and
// automatically generating documentation.
//
// Definitions here are considered to be locked down for the V2 registry api.
// Any changes must be considered carefully and should not proceed without a
// change proposal in docker core.
package v2

View File

@@ -0,0 +1,136 @@
package v2
import (
"net/http"
"github.com/docker/distribution/registry/api/errcode"
)
const errGroup = "registry.api.v2"
var (
// ErrorCodeDigestInvalid is returned when uploading a blob if the
// provided digest does not match the blob contents.
ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "DIGEST_INVALID",
Message: "provided digest did not match uploaded content",
Description: `When a blob is uploaded, the registry will check that
the content matches the digest provided by the client. The error may
include a detail structure with the key "digest", including the
invalid digest string. This error may also be returned when a manifest
includes an invalid layer digest.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeSizeInvalid is returned when uploading a blob if the provided
ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "SIZE_INVALID",
Message: "provided length did not match content length",
Description: `When a layer is uploaded, the provided size will be
checked against the uploaded content. If they do not match, this error
will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeNameInvalid is returned when the name in the manifest does not
// match the provided name.
ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NAME_INVALID",
Message: "invalid repository name",
Description: `Invalid repository name encountered either during
manifest validation or any API operation.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeTagInvalid is returned when the tag in the manifest does not
// match the provided tag.
ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "TAG_INVALID",
Message: "manifest tag did not match URI",
Description: `During a manifest upload, if the tag in the manifest
does not match the uri tag, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeNameUnknown when the repository name is not known.
ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NAME_UNKNOWN",
Message: "repository name not known to registry",
Description: `This is returned if the name used during an operation is
unknown to the registry.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeManifestUnknown returned when image manifest is unknown.
ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_UNKNOWN",
Message: "manifest unknown",
Description: `This error is returned when the manifest, identified by
name and tag is unknown to the repository.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeManifestInvalid returned when an image manifest is invalid,
// typically during a PUT operation. This error encompasses all errors
// encountered during manifest validation that aren't signature errors.
ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_INVALID",
Message: "manifest invalid",
Description: `During upload, manifests undergo several checks ensuring
validity. If those checks fail, this error may be returned, unless a
more specific error is included. The detail will contain information
the failed validation.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeManifestUnverified is returned when the manifest fails
// signature verification.
ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_UNVERIFIED",
Message: "manifest failed signature verification",
Description: `During manifest upload, if the manifest fails signature
verification, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeManifestBlobUnknown is returned when a manifest blob is
// unknown to the registry.
ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MANIFEST_BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a manifest blob is
unknown to the registry.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeBlobUnknown is returned when a blob is unknown to the
// registry. This can happen when the manifest references a nonexistent
// layer or the result is not found by a blob fetch.
ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a blob is unknown to the
registry in a specified repository. This can be returned with a
standard get or if a manifest references an unknown layer during
upload.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BLOB_UPLOAD_UNKNOWN",
Message: "blob upload unknown to registry",
Description: `If a blob upload has been cancelled or was never
started, this error code may be returned.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeBlobUploadInvalid is returned when an upload is invalid.
ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BLOB_UPLOAD_INVALID",
Message: "blob upload invalid",
Description: `The blob upload encountered an error and can no
longer proceed.`,
HTTPStatusCode: http.StatusNotFound,
})
)

View File

@@ -0,0 +1,161 @@
package v2
import (
"fmt"
"regexp"
"strings"
"unicode"
)
var (
// according to rfc7230
reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`)
reQuotedValue = regexp.MustCompile(`^[^\\"]+`)
reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`)
)
// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains
// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The
// function parses only the first element of the list, which is set by the very first proxy. It returns a map
// of corresponding key-value pairs and an unparsed slice of the input string.
//
// Examples of Forwarded header values:
//
// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown
// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80"
//
// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into
// {"for": "192.0.2.43:443", "host": "registry.example.org"}.
func parseForwardedHeader(forwarded string) (map[string]string, string, error) {
// Following are states of forwarded header parser. Any state could transition to a failure.
const (
// terminating state; can transition to Parameter
stateElement = iota
// terminating state; can transition to KeyValueDelimiter
stateParameter
// can transition to Value
stateKeyValueDelimiter
// can transition to one of { QuotedValue, PairEnd }
stateValue
// can transition to one of { EscapedCharacter, PairEnd }
stateQuotedValue
// can transition to one of { QuotedValue }
stateEscapedCharacter
// terminating state; can transition to one of { Parameter, Element }
statePairEnd
)
var (
parameter string
value string
parse = forwarded[:]
res = map[string]string{}
state = stateElement
)
Loop:
for {
// skip spaces unless in quoted value
if state != stateQuotedValue && state != stateEscapedCharacter {
parse = strings.TrimLeftFunc(parse, unicode.IsSpace)
}
if len(parse) == 0 {
if state != stateElement && state != statePairEnd && state != stateParameter {
return nil, parse, fmt.Errorf("unexpected end of input")
}
// terminating
break
}
switch state {
// terminate at list element delimiter
case stateElement:
if parse[0] == ',' {
parse = parse[1:]
break Loop
}
state = stateParameter
// parse parameter (the key of key-value pair)
case stateParameter:
match := reToken.FindString(parse)
if len(match) == 0 {
return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse))
}
parameter = strings.ToLower(match)
parse = parse[len(match):]
state = stateKeyValueDelimiter
// parse '='
case stateKeyValueDelimiter:
if parse[0] != '=' {
return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse))
}
parse = parse[1:]
state = stateValue
// parse value or quoted value
case stateValue:
if parse[0] == '"' {
parse = parse[1:]
state = stateQuotedValue
} else {
value = reToken.FindString(parse)
if len(value) == 0 {
return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse))
}
if _, exists := res[parameter]; exists {
return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse))
}
res[parameter] = value
parse = parse[len(value):]
value = ""
state = statePairEnd
}
// parse a part of quoted value until the first backslash
case stateQuotedValue:
match := reQuotedValue.FindString(parse)
value += match
parse = parse[len(match):]
switch {
case len(parse) == 0:
return nil, parse, fmt.Errorf("unterminated quoted string")
case parse[0] == '"':
res[parameter] = value
value = ""
parse = parse[1:]
state = statePairEnd
case parse[0] == '\\':
parse = parse[1:]
state = stateEscapedCharacter
}
// parse escaped character in a quoted string, ignore the backslash
// transition back to QuotedValue state
case stateEscapedCharacter:
c := reEscapedCharacter.FindString(parse)
if len(c) == 0 {
return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1)
}
value += c
parse = parse[1:]
state = stateQuotedValue
// expect either a new key-value pair, new list or end of input
case statePairEnd:
switch parse[0] {
case ';':
parse = parse[1:]
state = stateParameter
case ',':
state = stateElement
default:
return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse))
}
}
}
return res, parse, nil
}

View File

@@ -0,0 +1,40 @@
package v2
import "github.com/gorilla/mux"
// The following are definitions of the name under which all V2 routes are
// registered. These symbols can be used to look up a route based on the name.
const (
RouteNameBase = "base"
RouteNameManifest = "manifest"
RouteNameTags = "tags"
RouteNameBlob = "blob"
RouteNameBlobUpload = "blob-upload"
RouteNameBlobUploadChunk = "blob-upload-chunk"
RouteNameCatalog = "catalog"
)
// Router builds a gorilla router with named routes for the various API
// methods. This can be used directly by both server implementations and
// clients.
func Router() *mux.Router {
return RouterWithPrefix("")
}
// RouterWithPrefix builds a gorilla router with a configured prefix
// on all routes.
func RouterWithPrefix(prefix string) *mux.Router {
rootRouter := mux.NewRouter()
router := rootRouter
if prefix != "" {
router = router.PathPrefix(prefix).Subrouter()
}
router.StrictSlash(true)
for _, descriptor := range routeDescriptors {
router.Path(descriptor.Path).Name(descriptor.Name)
}
return rootRouter
}

View File

@@ -0,0 +1,266 @@
package v2
import (
"fmt"
"net/http"
"net/url"
"strings"
"github.com/docker/distribution/reference"
"github.com/gorilla/mux"
)
// URLBuilder creates registry API urls from a single base endpoint. It can be
// used to create urls for use in a registry client or server.
//
// All urls will be created from the given base, including the api version.
// For example, if a root of "/foo/" is provided, urls generated will be fall
// under "/foo/v2/...". Most application will only provide a schema, host and
// port, such as "https://localhost:5000/".
type URLBuilder struct {
root *url.URL // url root (ie http://localhost/)
router *mux.Router
relative bool
}
// NewURLBuilder creates a URLBuilder with provided root url object.
func NewURLBuilder(root *url.URL, relative bool) *URLBuilder {
return &URLBuilder{
root: root,
router: Router(),
relative: relative,
}
}
// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
// a string argument for the root, returning an error if it is not a valid
// url.
func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
u, err := url.Parse(root)
if err != nil {
return nil, err
}
return NewURLBuilder(u, relative), nil
}
// NewURLBuilderFromRequest uses information from an *http.Request to
// construct the root url.
func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
var (
scheme = "http"
host = r.Host
)
if r.TLS != nil {
scheme = "https"
} else if len(r.URL.Scheme) > 0 {
scheme = r.URL.Scheme
}
// Handle fowarded headers
// Prefer "Forwarded" header as defined by rfc7239 if given
// see https://tools.ietf.org/html/rfc7239
if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 {
forwardedHeader, _, err := parseForwardedHeader(forwarded)
if err == nil {
if fproto := forwardedHeader["proto"]; len(fproto) > 0 {
scheme = fproto
}
if fhost := forwardedHeader["host"]; len(fhost) > 0 {
host = fhost
}
}
} else {
if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 {
scheme = forwardedProto
}
if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
// comma-separated list of hosts, to which each proxy appends the
// requested host. We want to grab the first from this comma-separated
// list.
hosts := strings.SplitN(forwardedHost, ",", 2)
host = strings.TrimSpace(hosts[0])
}
}
basePath := routeDescriptorsMap[RouteNameBase].Path
requestPath := r.URL.Path
index := strings.Index(requestPath, basePath)
u := &url.URL{
Scheme: scheme,
Host: host,
}
if index > 0 {
// N.B. index+1 is important because we want to include the trailing /
u.Path = requestPath[0 : index+1]
}
return NewURLBuilder(u, relative)
}
// BuildBaseURL constructs a base url for the API, typically just "/v2/".
func (ub *URLBuilder) BuildBaseURL() (string, error) {
route := ub.cloneRoute(RouteNameBase)
baseURL, err := route.URL()
if err != nil {
return "", err
}
return baseURL.String(), nil
}
// BuildCatalogURL constructs a url get a catalog of repositories
func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameCatalog)
catalogURL, err := route.URL()
if err != nil {
return "", err
}
return appendValuesURL(catalogURL, values...).String(), nil
}
// BuildTagsURL constructs a url to list the tags in the named repository.
func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) {
route := ub.cloneRoute(RouteNameTags)
tagsURL, err := route.URL("name", name.Name())
if err != nil {
return "", err
}
return tagsURL.String(), nil
}
// BuildManifestURL constructs a url for the manifest identified by name and
// reference. The argument reference may be either a tag or digest.
func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
route := ub.cloneRoute(RouteNameManifest)
tagOrDigest := ""
switch v := ref.(type) {
case reference.Tagged:
tagOrDigest = v.Tag()
case reference.Digested:
tagOrDigest = v.Digest().String()
default:
return "", fmt.Errorf("reference must have a tag or digest")
}
manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
if err != nil {
return "", err
}
return manifestURL.String(), nil
}
// BuildBlobURL constructs the url for the blob identified by name and dgst.
func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) {
route := ub.cloneRoute(RouteNameBlob)
layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String())
if err != nil {
return "", err
}
return layerURL.String(), nil
}
// BuildBlobUploadURL constructs a url to begin a blob upload in the
// repository identified by name.
func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameBlobUpload)
uploadURL, err := route.URL("name", name.Name())
if err != nil {
return "", err
}
return appendValuesURL(uploadURL, values...).String(), nil
}
// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
// including any url values. This should generally not be used by clients, as
// this url is provided by server implementations during the blob upload
// process.
func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameBlobUploadChunk)
uploadURL, err := route.URL("name", name.Name(), "uuid", uuid)
if err != nil {
return "", err
}
return appendValuesURL(uploadURL, values...).String(), nil
}
// clondedRoute returns a clone of the named route from the router. Routes
// must be cloned to avoid modifying them during url generation.
func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
route := new(mux.Route)
root := new(url.URL)
*route = *ub.router.GetRoute(name) // clone the route
*root = *ub.root
return clonedRoute{Route: route, root: root, relative: ub.relative}
}
type clonedRoute struct {
*mux.Route
root *url.URL
relative bool
}
func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
routeURL, err := cr.Route.URL(pairs...)
if err != nil {
return nil, err
}
if cr.relative {
return routeURL, nil
}
if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" {
routeURL.Path = routeURL.Path[1:]
}
url := cr.root.ResolveReference(routeURL)
url.Scheme = cr.root.Scheme
return url, nil
}
// appendValuesURL appends the parameters to the url.
func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
merged := u.Query()
for _, v := range values {
for k, vv := range v {
merged[k] = append(merged[k], vv...)
}
}
u.RawQuery = merged.Encode()
return u
}
// appendValues appends the parameters to the url. Panics if the string is not
// a url.
func appendValues(u string, values ...url.Values) string {
up, err := url.Parse(u)
if err != nil {
panic(err) // should never happen
}
return appendValuesURL(up, values...).String()
}

View File

@@ -0,0 +1,58 @@
package auth
import (
"net/http"
"strings"
)
// APIVersion represents a version of an API including its
// type and version number.
type APIVersion struct {
// Type refers to the name of a specific API specification
// such as "registry"
Type string
// Version is the version of the API specification implemented,
// This may omit the revision number and only include
// the major and minor version, such as "2.0"
Version string
}
// String returns the string formatted API Version
func (v APIVersion) String() string {
return v.Type + "/" + v.Version
}
// APIVersions gets the API versions out of an HTTP response using the provided
// version header as the key for the HTTP header.
func APIVersions(resp *http.Response, versionHeader string) []APIVersion {
versions := []APIVersion{}
if versionHeader != "" {
for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] {
for _, version := range strings.Fields(supportedVersions) {
versions = append(versions, ParseAPIVersion(version))
}
}
}
return versions
}
// ParseAPIVersion parses an API version string into an APIVersion
// Format (Expected, not enforced):
// API version string = <API type> '/' <API version>
// API type = [a-z][a-z0-9]*
// API version = [0-9]+(\.[0-9]+)?
// TODO(dmcgowan): Enforce format, add error condition, remove unknown type
func ParseAPIVersion(versionStr string) APIVersion {
idx := strings.IndexRune(versionStr, '/')
if idx == -1 {
return APIVersion{
Type: "unknown",
Version: versionStr,
}
}
return APIVersion{
Type: strings.ToLower(versionStr[:idx]),
Version: versionStr[idx+1:],
}
}

View File

@@ -0,0 +1,530 @@
package auth
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/docker/distribution/registry/client"
"github.com/docker/distribution/registry/client/auth/challenge"
"github.com/docker/distribution/registry/client/transport"
)
var (
// ErrNoBasicAuthCredentials is returned if a request can't be authorized with
// basic auth due to lack of credentials.
ErrNoBasicAuthCredentials = errors.New("no basic auth credentials")
// ErrNoToken is returned if a request is successful but the body does not
// contain an authorization token.
ErrNoToken = errors.New("authorization server did not include a token in the response")
)
const defaultClientID = "registry-client"
// AuthenticationHandler is an interface for authorizing a request from
// params from a "WWW-Authenicate" header for a single scheme.
type AuthenticationHandler interface {
// Scheme returns the scheme as expected from the "WWW-Authenicate" header.
Scheme() string
// AuthorizeRequest adds the authorization header to a request (if needed)
// using the parameters from "WWW-Authenticate" method. The parameters
// values depend on the scheme.
AuthorizeRequest(req *http.Request, params map[string]string) error
}
// CredentialStore is an interface for getting credentials for
// a given URL
type CredentialStore interface {
// Basic returns basic auth for the given URL
Basic(*url.URL) (string, string)
// RefreshToken returns a refresh token for the
// given URL and service
RefreshToken(*url.URL, string) string
// SetRefreshToken sets the refresh token if none
// is provided for the given url and service
SetRefreshToken(realm *url.URL, service, token string)
}
// NewAuthorizer creates an authorizer which can handle multiple authentication
// schemes. The handlers are tried in order, the higher priority authentication
// methods should be first. The challengeMap holds a list of challenges for
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier {
return &endpointAuthorizer{
challenges: manager,
handlers: handlers,
}
}
type endpointAuthorizer struct {
challenges challenge.Manager
handlers []AuthenticationHandler
}
func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
pingPath := req.URL.Path
if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 {
pingPath = pingPath[:v2Root+4]
} else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 {
pingPath = pingPath[:v1Root] + "/v2/"
} else {
return nil
}
ping := url.URL{
Host: req.URL.Host,
Scheme: req.URL.Scheme,
Path: pingPath,
}
challenges, err := ea.challenges.GetChallenges(ping)
if err != nil {
return err
}
if len(challenges) > 0 {
for _, handler := range ea.handlers {
for _, c := range challenges {
if c.Scheme != handler.Scheme() {
continue
}
if err := handler.AuthorizeRequest(req, c.Parameters); err != nil {
return err
}
}
}
}
return nil
}
// This is the minimum duration a token can last (in seconds).
// A token must not live less than 60 seconds because older versions
// of the Docker client didn't read their expiration from the token
// response and assumed 60 seconds. So to remain compatible with
// those implementations, a token must live at least this long.
const minimumTokenLifetimeSeconds = 60
// Private interface for time used by this package to enable tests to provide their own implementation.
type clock interface {
Now() time.Time
}
type tokenHandler struct {
creds CredentialStore
transport http.RoundTripper
clock clock
offlineAccess bool
forceOAuth bool
clientID string
scopes []Scope
tokenLock sync.Mutex
tokenCache string
tokenExpiration time.Time
logger Logger
}
// Scope is a type which is serializable to a string
// using the allow scope grammar.
type Scope interface {
String() string
}
// RepositoryScope represents a token scope for access
// to a repository.
type RepositoryScope struct {
Repository string
Class string
Actions []string
}
// String returns the string representation of the repository
// using the scope grammar
func (rs RepositoryScope) String() string {
repoType := "repository"
// Keep existing format for image class to maintain backwards compatibility
// with authorization servers which do not support the expanded grammar.
if rs.Class != "" && rs.Class != "image" {
repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class)
}
return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ","))
}
// RegistryScope represents a token scope for access
// to resources in the registry.
type RegistryScope struct {
Name string
Actions []string
}
// String returns the string representation of the user
// using the scope grammar
func (rs RegistryScope) String() string {
return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ","))
}
// Logger defines the injectable logging interface, used on TokenHandlers.
type Logger interface {
Debugf(format string, args ...interface{})
}
func logDebugf(logger Logger, format string, args ...interface{}) {
if logger == nil {
return
}
logger.Debugf(format, args...)
}
// TokenHandlerOptions is used to configure a new token handler
type TokenHandlerOptions struct {
Transport http.RoundTripper
Credentials CredentialStore
OfflineAccess bool
ForceOAuth bool
ClientID string
Scopes []Scope
Logger Logger
}
// An implementation of clock for providing real time data.
type realClock struct{}
// Now implements clock
func (realClock) Now() time.Time { return time.Now() }
// NewTokenHandler creates a new AuthenicationHandler which supports
// fetching tokens from a remote token server.
func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler {
// Create options...
return NewTokenHandlerWithOptions(TokenHandlerOptions{
Transport: transport,
Credentials: creds,
Scopes: []Scope{
RepositoryScope{
Repository: scope,
Actions: actions,
},
},
})
}
// NewTokenHandlerWithOptions creates a new token handler using the provided
// options structure.
func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler {
handler := &tokenHandler{
transport: options.Transport,
creds: options.Credentials,
offlineAccess: options.OfflineAccess,
forceOAuth: options.ForceOAuth,
clientID: options.ClientID,
scopes: options.Scopes,
clock: realClock{},
logger: options.Logger,
}
return handler
}
func (th *tokenHandler) client() *http.Client {
return &http.Client{
Transport: th.transport,
Timeout: 15 * time.Second,
}
}
func (th *tokenHandler) Scheme() string {
return "bearer"
}
func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
var additionalScopes []string
if fromParam := req.URL.Query().Get("from"); fromParam != "" {
additionalScopes = append(additionalScopes, RepositoryScope{
Repository: fromParam,
Actions: []string{"pull"},
}.String())
}
token, err := th.getToken(params, additionalScopes...)
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
return nil
}
func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) {
th.tokenLock.Lock()
defer th.tokenLock.Unlock()
scopes := make([]string, 0, len(th.scopes)+len(additionalScopes))
for _, scope := range th.scopes {
scopes = append(scopes, scope.String())
}
var addedScopes bool
for _, scope := range additionalScopes {
if hasScope(scopes, scope) {
continue
}
scopes = append(scopes, scope)
addedScopes = true
}
now := th.clock.Now()
if now.After(th.tokenExpiration) || addedScopes {
token, expiration, err := th.fetchToken(params, scopes)
if err != nil {
return "", err
}
// do not update cache for added scope tokens
if !addedScopes {
th.tokenCache = token
th.tokenExpiration = expiration
}
return token, nil
}
return th.tokenCache, nil
}
func hasScope(scopes []string, scope string) bool {
for _, s := range scopes {
if s == scope {
return true
}
}
return false
}
type postTokenResponse struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
Scope string `json:"scope"`
}
func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) {
form := url.Values{}
form.Set("scope", strings.Join(scopes, " "))
form.Set("service", service)
clientID := th.clientID
if clientID == "" {
// Use default client, this is a required field
clientID = defaultClientID
}
form.Set("client_id", clientID)
if refreshToken != "" {
form.Set("grant_type", "refresh_token")
form.Set("refresh_token", refreshToken)
} else if th.creds != nil {
form.Set("grant_type", "password")
username, password := th.creds.Basic(realm)
form.Set("username", username)
form.Set("password", password)
// attempt to get a refresh token
form.Set("access_type", "offline")
} else {
// refuse to do oauth without a grant type
return "", time.Time{}, fmt.Errorf("no supported grant type")
}
resp, err := th.client().PostForm(realm.String(), form)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if !client.SuccessStatus(resp.StatusCode) {
err := client.HandleErrorResponse(resp)
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr postTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.RefreshToken != "" && tr.RefreshToken != refreshToken {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
type getTokenResponse struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
RefreshToken string `json:"refresh_token"`
}
func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) {
req, err := http.NewRequest("GET", realm.String(), nil)
if err != nil {
return "", time.Time{}, err
}
reqParams := req.URL.Query()
if service != "" {
reqParams.Add("service", service)
}
for _, scope := range scopes {
reqParams.Add("scope", scope)
}
if th.offlineAccess {
reqParams.Add("offline_token", "true")
clientID := th.clientID
if clientID == "" {
clientID = defaultClientID
}
reqParams.Add("client_id", clientID)
}
if th.creds != nil {
username, password := th.creds.Basic(realm)
if username != "" && password != "" {
reqParams.Add("account", username)
req.SetBasicAuth(username, password)
}
}
req.URL.RawQuery = reqParams.Encode()
resp, err := th.client().Do(req)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if !client.SuccessStatus(resp.StatusCode) {
err := client.HandleErrorResponse(resp)
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr getTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.RefreshToken != "" && th.creds != nil {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
// `access_token` is equivalent to `token` and if both are specified
// the choice is undefined. Canonicalize `access_token` by sticking
// things in `token`.
if tr.AccessToken != "" {
tr.Token = tr.AccessToken
}
if tr.Token == "" {
return "", time.Time{}, ErrNoToken
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) {
realm, ok := params["realm"]
if !ok {
return "", time.Time{}, errors.New("no realm specified for token auth challenge")
}
// TODO(dmcgowan): Handle empty scheme and relative realm
realmURL, err := url.Parse(realm)
if err != nil {
return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err)
}
service := params["service"]
var refreshToken string
if th.creds != nil {
refreshToken = th.creds.RefreshToken(realmURL, service)
}
if refreshToken != "" || th.forceOAuth {
return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes)
}
return th.fetchTokenWithBasicAuth(realmURL, service, scopes)
}
type basicHandler struct {
creds CredentialStore
}
// NewBasicHandler creaters a new authentiation handler which adds
// basic authentication credentials to a request.
func NewBasicHandler(creds CredentialStore) AuthenticationHandler {
return &basicHandler{
creds: creds,
}
}
func (*basicHandler) Scheme() string {
return "basic"
}
func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
if bh.creds != nil {
username, password := bh.creds.Basic(req.URL)
if username != "" && password != "" {
req.SetBasicAuth(username, password)
return nil
}
}
return ErrNoBasicAuthCredentials
}

View File

@@ -0,0 +1,162 @@
package client
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"time"
"github.com/docker/distribution"
)
type httpBlobUpload struct {
statter distribution.BlobStatter
client *http.Client
uuid string
startedAt time.Time
location string // always the last value of the location header.
offset int64
closed bool
}
func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
panic("Not implemented")
}
func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUploadUnknown
}
return HandleErrorResponse(resp)
}
func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r))
if err != nil {
return 0, err
}
defer req.Body.Close()
resp, err := hbu.client.Do(req)
if err != nil {
return 0, err
}
if !SuccessStatus(resp.StatusCode) {
return 0, hbu.handleErrorResponse(resp)
}
hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
if err != nil {
return 0, err
}
rng := resp.Header.Get("Range")
var start, end int64
if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
return 0, err
} else if n != 2 || end < start {
return 0, fmt.Errorf("bad range format: %s", rng)
}
return (end - start + 1), nil
}
func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p))
if err != nil {
return 0, err
}
req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
req.Header.Set("Content-Type", "application/octet-stream")
resp, err := hbu.client.Do(req)
if err != nil {
return 0, err
}
if !SuccessStatus(resp.StatusCode) {
return 0, hbu.handleErrorResponse(resp)
}
hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
if err != nil {
return 0, err
}
rng := resp.Header.Get("Range")
var start, end int
if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
return 0, err
} else if n != 2 || end < start {
return 0, fmt.Errorf("bad range format: %s", rng)
}
return (end - start + 1), nil
}
func (hbu *httpBlobUpload) Size() int64 {
return hbu.offset
}
func (hbu *httpBlobUpload) ID() string {
return hbu.uuid
}
func (hbu *httpBlobUpload) StartedAt() time.Time {
return hbu.startedAt
}
func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
// TODO(dmcgowan): Check if already finished, if so just fetch
req, err := http.NewRequest("PUT", hbu.location, nil)
if err != nil {
return distribution.Descriptor{}, err
}
values := req.URL.Query()
values.Set("digest", desc.Digest.String())
req.URL.RawQuery = values.Encode()
resp, err := hbu.client.Do(req)
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
if !SuccessStatus(resp.StatusCode) {
return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
}
return hbu.statter.Stat(ctx, desc.Digest)
}
func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
req, err := http.NewRequest("DELETE", hbu.location, nil)
if err != nil {
return err
}
resp, err := hbu.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
return nil
}
return hbu.handleErrorResponse(resp)
}
func (hbu *httpBlobUpload) Close() error {
hbu.closed = true
return nil
}

View File

@@ -0,0 +1,139 @@
package client
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/client/auth/challenge"
)
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
// errcode.Errors slice.
var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
// returned when making a registry api call.
type UnexpectedHTTPStatusError struct {
Status string
}
func (e *UnexpectedHTTPStatusError) Error() string {
return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
}
// UnexpectedHTTPResponseError is returned when an expected HTTP status code
// is returned, but the content was unexpected and failed to be parsed.
type UnexpectedHTTPResponseError struct {
ParseErr error
StatusCode int
Response []byte
}
func (e *UnexpectedHTTPResponseError) Error() string {
return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
}
func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
var errors errcode.Errors
body, err := ioutil.ReadAll(r)
if err != nil {
return err
}
// For backward compatibility, handle irregularly formatted
// messages that contain a "details" field.
var detailsErr struct {
Details string `json:"details"`
}
err = json.Unmarshal(body, &detailsErr)
if err == nil && detailsErr.Details != "" {
switch statusCode {
case http.StatusUnauthorized:
return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
case http.StatusTooManyRequests:
return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
default:
return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
}
}
if err := json.Unmarshal(body, &errors); err != nil {
return &UnexpectedHTTPResponseError{
ParseErr: err,
StatusCode: statusCode,
Response: body,
}
}
if len(errors) == 0 {
// If there was no error specified in the body, return
// UnexpectedHTTPResponseError.
return &UnexpectedHTTPResponseError{
ParseErr: ErrNoErrorsInBody,
StatusCode: statusCode,
Response: body,
}
}
return errors
}
func makeErrorList(err error) []error {
if errL, ok := err.(errcode.Errors); ok {
return []error(errL)
}
return []error{err}
}
func mergeErrors(err1, err2 error) error {
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
}
// HandleErrorResponse returns error parsed from HTTP response for an
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
// UnexpectedHTTPStatusError returned for response code outside of expected
// range.
func HandleErrorResponse(resp *http.Response) error {
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
// Check for OAuth errors within the `WWW-Authenticate` header first
// See https://tools.ietf.org/html/rfc6750#section-3
for _, c := range challenge.ResponseChallenges(resp) {
if c.Scheme == "bearer" {
var err errcode.Error
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
switch c.Parameters["error"] {
case "invalid_token":
err.Code = errcode.ErrorCodeUnauthorized
case "insufficient_scope":
err.Code = errcode.ErrorCodeDenied
default:
continue
}
if description := c.Parameters["error_description"]; description != "" {
err.Message = description
} else {
err.Message = err.Code.Message()
}
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
}
}
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
}
return err
}
return &UnexpectedHTTPStatusError{Status: resp.Status}
}
// SuccessStatus returns true if the argument is a successful HTTP response
// code (in the range 200 - 399 inclusive).
func SuccessStatus(status int) bool {
return status >= 200 && status <= 399
}

View File

@@ -0,0 +1,867 @@
package client
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/distribution/registry/storage/cache"
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/opencontainers/go-digest"
)
// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
type Registry interface {
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
}
// checkHTTPRedirect is a callback that can manipulate redirected HTTP
// requests. It is used to preserve Accept and Range headers.
func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
if len(via) > 0 {
for headerName, headerVals := range via[0].Header {
if headerName != "Accept" && headerName != "Range" {
continue
}
for _, val := range headerVals {
// Don't add to redirected request if redirected
// request already has a header with the same
// name and value.
hasValue := false
for _, existingVal := range req.Header[headerName] {
if existingVal == val {
hasValue = true
break
}
}
if !hasValue {
req.Header.Add(headerName, val)
}
}
}
}
return nil
}
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) {
ub, err := v2.NewURLBuilderFromString(baseURL, false)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
Timeout: 1 * time.Minute,
CheckRedirect: checkHTTPRedirect,
}
return &registry{
client: client,
ub: ub,
}, nil
}
type registry struct {
client *http.Client
ub *v2.URLBuilder
}
// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
// are no more entries
func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
var numFilled int
var returnErr error
values := buildCatalogValues(len(entries), last)
u, err := r.ub.BuildCatalogURL(values)
if err != nil {
return 0, err
}
resp, err := r.client.Get(u)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
var ctlg struct {
Repositories []string `json:"repositories"`
}
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&ctlg); err != nil {
return 0, err
}
for cnt := range ctlg.Repositories {
entries[cnt] = ctlg.Repositories[cnt]
}
numFilled = len(ctlg.Repositories)
link := resp.Header.Get("Link")
if link == "" {
returnErr = io.EOF
}
} else {
return 0, HandleErrorResponse(resp)
}
return numFilled, returnErr
}
// NewRepository creates a new Repository for the given repository name and base URL.
func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
ub, err := v2.NewURLBuilderFromString(baseURL, false)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
CheckRedirect: checkHTTPRedirect,
// TODO(dmcgowan): create cookie jar
}
return &repository{
client: client,
ub: ub,
name: name,
}, nil
}
type repository struct {
client *http.Client
ub *v2.URLBuilder
name reference.Named
}
func (r *repository) Named() reference.Named {
return r.name
}
func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
statter := &blobStatter{
name: r.name,
ub: r.ub,
client: r.client,
}
return &blobs{
name: r.name,
ub: r.ub,
client: r.client,
statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
}
}
func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
// todo(richardscothern): options should be sent over the wire
return &manifests{
name: r.name,
ub: r.ub,
client: r.client,
etags: make(map[string]string),
}, nil
}
func (r *repository) Tags(ctx context.Context) distribution.TagService {
return &tags{
client: r.client,
ub: r.ub,
name: r.Named(),
}
}
// tags implements remote tagging operations.
type tags struct {
client *http.Client
ub *v2.URLBuilder
name reference.Named
}
// All returns all tags
func (t *tags) All(ctx context.Context) ([]string, error) {
var tags []string
listURLStr, err := t.ub.BuildTagsURL(t.name)
if err != nil {
return tags, err
}
listURL, err := url.Parse(listURLStr)
if err != nil {
return tags, err
}
for {
resp, err := t.client.Get(listURL.String())
if err != nil {
return tags, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return tags, err
}
tagsResponse := struct {
Tags []string `json:"tags"`
}{}
if err := json.Unmarshal(b, &tagsResponse); err != nil {
return tags, err
}
tags = append(tags, tagsResponse.Tags...)
if link := resp.Header.Get("Link"); link != "" {
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
if err != nil {
return tags, err
}
listURL = listURL.ResolveReference(linkURL)
} else {
return tags, nil
}
} else {
return tags, HandleErrorResponse(resp)
}
}
}
func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
desc := distribution.Descriptor{}
headers := response.Header
ctHeader := headers.Get("Content-Type")
if ctHeader == "" {
return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
}
desc.MediaType = ctHeader
digestHeader := headers.Get("Docker-Content-Digest")
if digestHeader == "" {
bytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return distribution.Descriptor{}, err
}
_, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
if err != nil {
return distribution.Descriptor{}, err
}
return desc, nil
}
dgst, err := digest.Parse(digestHeader)
if err != nil {
return distribution.Descriptor{}, err
}
desc.Digest = dgst
lengthHeader := headers.Get("Content-Length")
if lengthHeader == "" {
return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
}
length, err := strconv.ParseInt(lengthHeader, 10, 64)
if err != nil {
return distribution.Descriptor{}, err
}
desc.Size = length
return desc, nil
}
// Get issues a HEAD request for a Manifest against its named endpoint in order
// to construct a descriptor for the tag. If the registry doesn't support HEADing
// a manifest, fallback to GET.
func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
ref, err := reference.WithTag(t.name, tag)
if err != nil {
return distribution.Descriptor{}, err
}
u, err := t.ub.BuildManifestURL(ref)
if err != nil {
return distribution.Descriptor{}, err
}
newRequest := func(method string) (*http.Response, error) {
req, err := http.NewRequest(method, u, nil)
if err != nil {
return nil, err
}
for _, t := range distribution.ManifestMediaTypes() {
req.Header.Add("Accept", t)
}
resp, err := t.client.Do(req)
return resp, err
}
resp, err := newRequest("HEAD")
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
switch {
case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0:
// if the response is a success AND a Docker-Content-Digest can be retrieved from the headers
return descriptorFromResponse(resp)
default:
// if the response is an error - there will be no body to decode.
// Issue a GET request:
// - for data from a server that does not handle HEAD
// - to get error details in case of a failure
resp, err = newRequest("GET")
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
return descriptorFromResponse(resp)
}
return distribution.Descriptor{}, HandleErrorResponse(resp)
}
}
func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
panic("not implemented")
}
func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
panic("not implemented")
}
func (t *tags) Untag(ctx context.Context, tag string) error {
panic("not implemented")
}
type manifests struct {
name reference.Named
ub *v2.URLBuilder
client *http.Client
etags map[string]string
}
func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
ref, err := reference.WithDigest(ms.name, dgst)
if err != nil {
return false, err
}
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return false, err
}
resp, err := ms.client.Head(u)
if err != nil {
return false, err
}
if SuccessStatus(resp.StatusCode) {
return true, nil
} else if resp.StatusCode == http.StatusNotFound {
return false, nil
}
return false, HandleErrorResponse(resp)
}
// AddEtagToTag allows a client to supply an eTag to Get which will be
// used for a conditional HTTP request. If the eTag matches, a nil manifest
// and ErrManifestNotModified error will be returned. etag is automatically
// quoted when added to this map.
func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
return etagOption{tag, etag}
}
type etagOption struct{ tag, etag string }
func (o etagOption) Apply(ms distribution.ManifestService) error {
if ms, ok := ms.(*manifests); ok {
ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
return nil
}
return fmt.Errorf("etag options is a client-only option")
}
// ReturnContentDigest allows a client to set a the content digest on
// a successful request from the 'Docker-Content-Digest' header. This
// returned digest is represents the digest which the registry uses
// to refer to the content and can be used to delete the content.
func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
return contentDigestOption{dgst}
}
type contentDigestOption struct{ digest *digest.Digest }
func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
return nil
}
func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
var (
digestOrTag string
ref reference.Named
err error
contentDgst *digest.Digest
mediaTypes []string
)
for _, option := range options {
switch opt := option.(type) {
case distribution.WithTagOption:
digestOrTag = opt.Tag
ref, err = reference.WithTag(ms.name, opt.Tag)
if err != nil {
return nil, err
}
case contentDigestOption:
contentDgst = opt.digest
case distribution.WithManifestMediaTypesOption:
mediaTypes = opt.MediaTypes
default:
err := option.Apply(ms)
if err != nil {
return nil, err
}
}
}
if digestOrTag == "" {
digestOrTag = dgst.String()
ref, err = reference.WithDigest(ms.name, dgst)
if err != nil {
return nil, err
}
}
if len(mediaTypes) == 0 {
mediaTypes = distribution.ManifestMediaTypes()
}
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
for _, t := range mediaTypes {
req.Header.Add("Accept", t)
}
if _, ok := ms.etags[digestOrTag]; ok {
req.Header.Set("If-None-Match", ms.etags[digestOrTag])
}
resp, err := ms.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotModified {
return nil, distribution.ErrManifestNotModified
} else if SuccessStatus(resp.StatusCode) {
if contentDgst != nil {
dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
if err == nil {
*contentDgst = dgst
}
}
mt := resp.Header.Get("Content-Type")
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
m, _, err := distribution.UnmarshalManifest(mt, body)
if err != nil {
return nil, err
}
return m, nil
}
return nil, HandleErrorResponse(resp)
}
// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
// tag name in order to build the correct upload URL.
func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
ref := ms.name
var tagged bool
for _, option := range options {
if opt, ok := option.(distribution.WithTagOption); ok {
var err error
ref, err = reference.WithTag(ref, opt.Tag)
if err != nil {
return "", err
}
tagged = true
} else {
err := option.Apply(ms)
if err != nil {
return "", err
}
}
}
mediaType, p, err := m.Payload()
if err != nil {
return "", err
}
if !tagged {
// generate a canonical digest and Put by digest
_, d, err := distribution.UnmarshalManifest(mediaType, p)
if err != nil {
return "", err
}
ref, err = reference.WithDigest(ref, d.Digest)
if err != nil {
return "", err
}
}
manifestURL, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return "", err
}
putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
if err != nil {
return "", err
}
putRequest.Header.Set("Content-Type", mediaType)
resp, err := ms.client.Do(putRequest)
if err != nil {
return "", err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
dgstHeader := resp.Header.Get("Docker-Content-Digest")
dgst, err := digest.Parse(dgstHeader)
if err != nil {
return "", err
}
return dgst, nil
}
return "", HandleErrorResponse(resp)
}
func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
ref, err := reference.WithDigest(ms.name, dgst)
if err != nil {
return err
}
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", u, nil)
if err != nil {
return err
}
resp, err := ms.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
return nil
}
return HandleErrorResponse(resp)
}
// todo(richardscothern): Restore interface and implementation with merge of #1050
/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
panic("not supported")
}*/
type blobs struct {
name reference.Named
ub *v2.URLBuilder
client *http.Client
statter distribution.BlobDescriptorService
distribution.BlobDeleter
}
func sanitizeLocation(location, base string) (string, error) {
baseURL, err := url.Parse(base)
if err != nil {
return "", err
}
locationURL, err := url.Parse(location)
if err != nil {
return "", err
}
return baseURL.ResolveReference(locationURL).String(), nil
}
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return bs.statter.Stat(ctx, dgst)
}
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
reader, err := bs.Open(ctx, dgst)
if err != nil {
return nil, err
}
defer reader.Close()
return ioutil.ReadAll(reader)
}
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return nil, err
}
blobURL, err := bs.ub.BuildBlobURL(ref)
if err != nil {
return nil, err
}
return transport.NewHTTPReadSeeker(bs.client, blobURL,
func(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUnknown
}
return HandleErrorResponse(resp)
}), nil
}
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
panic("not implemented")
}
func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
writer, err := bs.Create(ctx)
if err != nil {
return distribution.Descriptor{}, err
}
dgstr := digest.Canonical.Digester()
n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
if err != nil {
return distribution.Descriptor{}, err
}
if n < int64(len(p)) {
return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
}
desc := distribution.Descriptor{
MediaType: mediaType,
Size: int64(len(p)),
Digest: dgstr.Digest(),
}
return writer.Commit(ctx, desc)
}
type optionFunc func(interface{}) error
func (f optionFunc) Apply(v interface{}) error {
return f(v)
}
// WithMountFrom returns a BlobCreateOption which designates that the blob should be
// mounted from the given canonical reference.
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
return optionFunc(func(v interface{}) error {
opts, ok := v.(*distribution.CreateOptions)
if !ok {
return fmt.Errorf("unexpected options type: %T", v)
}
opts.Mount.ShouldMount = true
opts.Mount.From = ref
return nil
})
}
func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
var opts distribution.CreateOptions
for _, option := range options {
err := option.Apply(&opts)
if err != nil {
return nil, err
}
}
var values []url.Values
if opts.Mount.ShouldMount {
values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
}
u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
if err != nil {
return nil, err
}
resp, err := bs.client.Post(u, "", nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusCreated:
desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
if err != nil {
return nil, err
}
return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
case http.StatusAccepted:
// TODO(dmcgowan): Check for invalid UUID
uuid := resp.Header.Get("Docker-Upload-UUID")
location, err := sanitizeLocation(resp.Header.Get("Location"), u)
if err != nil {
return nil, err
}
return &httpBlobUpload{
statter: bs.statter,
client: bs.client,
uuid: uuid,
startedAt: time.Now(),
location: location,
}, nil
default:
return nil, HandleErrorResponse(resp)
}
}
func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
panic("not implemented")
}
func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
return bs.statter.Clear(ctx, dgst)
}
type blobStatter struct {
name reference.Named
ub *v2.URLBuilder
client *http.Client
}
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return distribution.Descriptor{}, err
}
u, err := bs.ub.BuildBlobURL(ref)
if err != nil {
return distribution.Descriptor{}, err
}
resp, err := bs.client.Head(u)
if err != nil {
return distribution.Descriptor{}, err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
lengthHeader := resp.Header.Get("Content-Length")
if lengthHeader == "" {
return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
}
length, err := strconv.ParseInt(lengthHeader, 10, 64)
if err != nil {
return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
}
return distribution.Descriptor{
MediaType: resp.Header.Get("Content-Type"),
Size: length,
Digest: dgst,
}, nil
} else if resp.StatusCode == http.StatusNotFound {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
return distribution.Descriptor{}, HandleErrorResponse(resp)
}
func buildCatalogValues(maxEntries int, last string) url.Values {
values := url.Values{}
if maxEntries > 0 {
values.Add("n", strconv.Itoa(maxEntries))
}
if last != "" {
values.Add("last", last)
}
return values
}
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return err
}
blobURL, err := bs.ub.BuildBlobURL(ref)
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", blobURL, nil)
if err != nil {
return err
}
resp, err := bs.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if SuccessStatus(resp.StatusCode) {
return nil
}
return HandleErrorResponse(resp)
}
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
return nil
}

View File

@@ -0,0 +1,250 @@
package transport
import (
"errors"
"fmt"
"io"
"net/http"
"regexp"
"strconv"
)
var (
contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
// ErrWrongCodeForByteRange is returned if the client sends a request
// with a Range header but the server returns a 2xx or 3xx code other
// than 206 Partial Content.
ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
)
// ReadSeekCloser combines io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
io.ReadSeeker
io.Closer
}
// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
// request. When seeking and starting a read from a non-zero offset
// the a "Range" header will be added which sets the offset.
// TODO(dmcgowan): Move this into a separate utility package
func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
return &httpReadSeeker{
client: client,
url: url,
errorHandler: errorHandler,
}
}
type httpReadSeeker struct {
client *http.Client
url string
// errorHandler creates an error from an unsuccessful HTTP response.
// This allows the error to be created with the HTTP response body
// without leaking the body through a returned error.
errorHandler func(*http.Response) error
size int64
// rc is the remote read closer.
rc io.ReadCloser
// readerOffset tracks the offset as of the last read.
readerOffset int64
// seekOffset allows Seek to override the offset. Seek changes
// seekOffset instead of changing readOffset directly so that
// connection resets can be delayed and possibly avoided if the
// seek is undone (i.e. seeking to the end and then back to the
// beginning).
seekOffset int64
err error
}
func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
if hrs.err != nil {
return 0, hrs.err
}
// If we sought to a different position, we need to reset the
// connection. This logic is here instead of Seek so that if
// a seek is undone before the next read, the connection doesn't
// need to be closed and reopened. A common example of this is
// seeking to the end to determine the length, and then seeking
// back to the original position.
if hrs.readerOffset != hrs.seekOffset {
hrs.reset()
}
hrs.readerOffset = hrs.seekOffset
rd, err := hrs.reader()
if err != nil {
return 0, err
}
n, err = rd.Read(p)
hrs.seekOffset += int64(n)
hrs.readerOffset += int64(n)
return n, err
}
func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
if hrs.err != nil {
return 0, hrs.err
}
lastReaderOffset := hrs.readerOffset
if whence == io.SeekStart && hrs.rc == nil {
// If no request has been made yet, and we are seeking to an
// absolute position, set the read offset as well to avoid an
// unnecessary request.
hrs.readerOffset = offset
}
_, err := hrs.reader()
if err != nil {
hrs.readerOffset = lastReaderOffset
return 0, err
}
newOffset := hrs.seekOffset
switch whence {
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
if hrs.size < 0 {
return 0, errors.New("content length not known")
}
newOffset = hrs.size + offset
case io.SeekStart:
newOffset = offset
}
if newOffset < 0 {
err = errors.New("cannot seek to negative position")
} else {
hrs.seekOffset = newOffset
}
return hrs.seekOffset, err
}
func (hrs *httpReadSeeker) Close() error {
if hrs.err != nil {
return hrs.err
}
// close and release reader chain
if hrs.rc != nil {
hrs.rc.Close()
}
hrs.rc = nil
hrs.err = errors.New("httpLayer: closed")
return nil
}
func (hrs *httpReadSeeker) reset() {
if hrs.err != nil {
return
}
if hrs.rc != nil {
hrs.rc.Close()
hrs.rc = nil
}
}
func (hrs *httpReadSeeker) reader() (io.Reader, error) {
if hrs.err != nil {
return nil, hrs.err
}
if hrs.rc != nil {
return hrs.rc, nil
}
req, err := http.NewRequest("GET", hrs.url, nil)
if err != nil {
return nil, err
}
if hrs.readerOffset > 0 {
// If we are at different offset, issue a range request from there.
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
// TODO: get context in here
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
}
req.Header.Add("Accept-Encoding", "identity")
resp, err := hrs.client.Do(req)
if err != nil {
return nil, err
}
// Normally would use client.SuccessStatus, but that would be a cyclic
// import
if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
if hrs.readerOffset > 0 {
if resp.StatusCode != http.StatusPartialContent {
return nil, ErrWrongCodeForByteRange
}
contentRange := resp.Header.Get("Content-Range")
if contentRange == "" {
return nil, errors.New("no Content-Range header found in HTTP 206 response")
}
submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
if len(submatches) < 4 {
return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
}
startByte, err := strconv.ParseUint(submatches[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
}
if startByte != uint64(hrs.readerOffset) {
return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
}
endByte, err := strconv.ParseUint(submatches[2], 10, 64)
if err != nil {
return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
}
if submatches[3] == "*" {
hrs.size = -1
} else {
size, err := strconv.ParseUint(submatches[3], 10, 64)
if err != nil {
return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
}
if endByte+1 != size {
return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
}
hrs.size = int64(size)
}
} else if resp.StatusCode == http.StatusOK {
hrs.size = resp.ContentLength
} else {
hrs.size = -1
}
hrs.rc = resp.Body
} else {
defer resp.Body.Close()
if hrs.errorHandler != nil {
return nil, hrs.errorHandler(resp)
}
return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
}
return hrs.rc, nil
}

View File

@@ -0,0 +1,147 @@
package transport
import (
"io"
"net/http"
"sync"
)
// RequestModifier represents an object which will do an inplace
// modification of an HTTP request.
type RequestModifier interface {
ModifyRequest(*http.Request) error
}
type headerModifier http.Header
// NewHeaderRequestModifier returns a new RequestModifier which will
// add the given headers to a request.
func NewHeaderRequestModifier(header http.Header) RequestModifier {
return headerModifier(header)
}
func (h headerModifier) ModifyRequest(req *http.Request) error {
for k, s := range http.Header(h) {
req.Header[k] = append(req.Header[k], s...)
}
return nil
}
// NewTransport creates a new transport which will apply modifiers to
// the request on a RoundTrip call.
func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
return &transport{
Modifiers: modifiers,
Base: base,
}
}
// transport is an http.RoundTripper that makes HTTP requests after
// copying and modifying the request
type transport struct {
Modifiers []RequestModifier
Base http.RoundTripper
mu sync.Mutex // guards modReq
modReq map[*http.Request]*http.Request // original -> modified
}
// RoundTrip authorizes and authenticates the request with an
// access token. If no token exists or token is expired,
// tries to refresh/fetch a new token.
func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
req2 := cloneRequest(req)
for _, modifier := range t.Modifiers {
if err := modifier.ModifyRequest(req2); err != nil {
return nil, err
}
}
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
if err != nil {
t.setModReq(req, nil)
return nil, err
}
res.Body = &onEOFReader{
rc: res.Body,
fn: func() { t.setModReq(req, nil) },
}
return res, nil
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t *transport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base().(canceler); ok {
t.mu.Lock()
modReq := t.modReq[req]
delete(t.modReq, req)
t.mu.Unlock()
cr.CancelRequest(modReq)
}
}
func (t *transport) base() http.RoundTripper {
if t.Base != nil {
return t.Base
}
return http.DefaultTransport
}
func (t *transport) setModReq(orig, mod *http.Request) {
t.mu.Lock()
defer t.mu.Unlock()
if t.modReq == nil {
t.modReq = make(map[*http.Request]*http.Request)
}
if mod == nil {
delete(t.modReq, orig)
} else {
t.modReq[orig] = mod
}
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
return r2
}
type onEOFReader struct {
rc io.ReadCloser
fn func()
}
func (r *onEOFReader) Read(p []byte) (n int, err error) {
n, err = r.rc.Read(p)
if err == io.EOF {
r.runFunc()
}
return
}
func (r *onEOFReader) Close() error {
err := r.rc.Close()
r.runFunc()
return err
}
func (r *onEOFReader) runFunc() {
if fn := r.fn; fn != nil {
fn()
r.fn = nil
}
}

View File

@@ -0,0 +1,35 @@
// Package cache provides facilities to speed up access to the storage
// backend.
package cache
import (
"fmt"
"github.com/docker/distribution"
)
// BlobDescriptorCacheProvider provides repository scoped
// BlobDescriptorService cache instances and a global descriptor cache.
type BlobDescriptorCacheProvider interface {
distribution.BlobDescriptorService
RepositoryScoped(repo string) (distribution.BlobDescriptorService, error)
}
// ValidateDescriptor provides a helper function to ensure that caches have
// common criteria for admitting descriptors.
func ValidateDescriptor(desc distribution.Descriptor) error {
if err := desc.Digest.Validate(); err != nil {
return err
}
if desc.Size < 0 {
return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
}
if desc.MediaType == "" {
return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc)
}
return nil
}

View File

@@ -0,0 +1,129 @@
package cache
import (
"context"
"github.com/docker/distribution"
prometheus "github.com/docker/distribution/metrics"
"github.com/opencontainers/go-digest"
)
// Metrics is used to hold metric counters
// related to the number of times a cache was
// hit or missed.
type Metrics struct {
Requests uint64
Hits uint64
Misses uint64
}
// Logger can be provided on the MetricsTracker to log errors.
//
// Usually, this is just a proxy to dcontext.GetLogger.
type Logger interface {
Errorf(format string, args ...interface{})
}
// MetricsTracker represents a metric tracker
// which simply counts the number of hits and misses.
type MetricsTracker interface {
Hit()
Miss()
Metrics() Metrics
Logger(context.Context) Logger
}
type cachedBlobStatter struct {
cache distribution.BlobDescriptorService
backend distribution.BlobDescriptorService
tracker MetricsTracker
}
var (
// cacheCount is the number of total cache request received/hits/misses
cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
)
// NewCachedBlobStatter creates a new statter which prefers a cache and
// falls back to a backend.
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
return &cachedBlobStatter{
cache: cache,
backend: backend,
}
}
// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
// falls back to a backend. Hits and misses will send to the tracker.
func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
return &cachedBlobStatter{
cache: cache,
backend: backend,
tracker: tracker,
}
}
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
cacheCount.WithValues("Request").Inc(1)
desc, err := cbds.cache.Stat(ctx, dgst)
if err != nil {
if err != distribution.ErrBlobUnknown {
logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err)
}
goto fallback
}
cacheCount.WithValues("Hit").Inc(1)
if cbds.tracker != nil {
cbds.tracker.Hit()
}
return desc, nil
fallback:
cacheCount.WithValues("Miss").Inc(1)
if cbds.tracker != nil {
cbds.tracker.Miss()
}
desc, err = cbds.backend.Stat(ctx, dgst)
if err != nil {
return desc, err
}
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
}
return desc, err
}
func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
err := cbds.cache.Clear(ctx, dgst)
if err != nil {
return err
}
err = cbds.backend.Clear(ctx, dgst)
if err != nil {
return err
}
return nil
}
func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
}
return nil
}
func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) {
if tracker == nil {
return
}
logger := tracker.Logger(ctx)
if logger == nil {
return
}
logger.Errorf(format, args...)
}

View File

@@ -0,0 +1,179 @@
package memory
import (
"context"
"sync"
"github.com/docker/distribution"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache"
"github.com/opencontainers/go-digest"
)
type inMemoryBlobDescriptorCacheProvider struct {
global *mapBlobDescriptorCache
repositories map[string]*mapBlobDescriptorCache
mu sync.RWMutex
}
// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for
// storing blob descriptor data.
func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider {
return &inMemoryBlobDescriptorCacheProvider{
global: newMapBlobDescriptorCache(),
repositories: make(map[string]*mapBlobDescriptorCache),
}
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
if _, err := reference.ParseNormalizedNamed(repo); err != nil {
return nil, err
}
imbdcp.mu.RLock()
defer imbdcp.mu.RUnlock()
return &repositoryScopedInMemoryBlobDescriptorCache{
repo: repo,
parent: imbdcp,
repository: imbdcp.repositories[repo],
}, nil
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return imbdcp.global.Stat(ctx, dgst)
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
return imbdcp.global.Clear(ctx, dgst)
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
_, err := imbdcp.Stat(ctx, dgst)
if err == distribution.ErrBlobUnknown {
if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
// if the digests differ, set the other canonical mapping
if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil {
return err
}
}
// unknown, just set it
return imbdcp.global.SetDescriptor(ctx, dgst, desc)
}
// we already know it, do nothing
return err
}
// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped
// repository cache. Instances are not thread-safe but the delegated
// operations are.
type repositoryScopedInMemoryBlobDescriptorCache struct {
repo string
parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map
repository *mapBlobDescriptorCache
}
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
rsimbdcp.parent.mu.Lock()
repo := rsimbdcp.repository
rsimbdcp.parent.mu.Unlock()
if repo == nil {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
return repo.Stat(ctx, dgst)
}
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
rsimbdcp.parent.mu.Lock()
repo := rsimbdcp.repository
rsimbdcp.parent.mu.Unlock()
if repo == nil {
return distribution.ErrBlobUnknown
}
return repo.Clear(ctx, dgst)
}
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
rsimbdcp.parent.mu.Lock()
repo := rsimbdcp.repository
if repo == nil {
// allocate map since we are setting it now.
var ok bool
// have to read back value since we may have allocated elsewhere.
repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
if !ok {
repo = newMapBlobDescriptorCache()
rsimbdcp.parent.repositories[rsimbdcp.repo] = repo
}
rsimbdcp.repository = repo
}
rsimbdcp.parent.mu.Unlock()
if err := repo.SetDescriptor(ctx, dgst, desc); err != nil {
return err
}
return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc)
}
// mapBlobDescriptorCache provides a simple map-based implementation of the
// descriptor cache.
type mapBlobDescriptorCache struct {
descriptors map[digest.Digest]distribution.Descriptor
mu sync.RWMutex
}
var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{}
func newMapBlobDescriptorCache() *mapBlobDescriptorCache {
return &mapBlobDescriptorCache{
descriptors: make(map[digest.Digest]distribution.Descriptor),
}
}
func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
if err := dgst.Validate(); err != nil {
return distribution.Descriptor{}, err
}
mbdc.mu.RLock()
defer mbdc.mu.RUnlock()
desc, ok := mbdc.descriptors[dgst]
if !ok {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
return desc, nil
}
func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
mbdc.mu.Lock()
defer mbdc.mu.Unlock()
delete(mbdc.descriptors, dgst)
return nil
}
func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
if err := dgst.Validate(); err != nil {
return err
}
if err := cache.ValidateDescriptor(desc); err != nil {
return err
}
mbdc.mu.Lock()
defer mbdc.mu.Unlock()
mbdc.descriptors[dgst] = desc
return nil
}

126
vendor/github.com/docker/distribution/uuid/uuid.go generated vendored Normal file
View File

@@ -0,0 +1,126 @@
// Package uuid provides simple UUID generation. Only version 4 style UUIDs
// can be generated.
//
// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
package uuid
import (
"crypto/rand"
"fmt"
"io"
"os"
"syscall"
"time"
)
const (
// Bits is the number of bits in a UUID
Bits = 128
// Size is the number of bytes in a UUID
Size = Bits / 8
format = "%08x-%04x-%04x-%04x-%012x"
)
var (
// ErrUUIDInvalid indicates a parsed string is not a valid uuid.
ErrUUIDInvalid = fmt.Errorf("invalid uuid")
// Loggerf can be used to override the default logging destination. Such
// log messages in this library should be logged at warning or higher.
Loggerf = func(format string, args ...interface{}) {}
)
// UUID represents a UUID value. UUIDs can be compared and set to other values
// and accessed by byte.
type UUID [Size]byte
// Generate creates a new, version 4 uuid.
func Generate() (u UUID) {
const (
// ensures we backoff for less than 450ms total. Use the following to
// select new value, in units of 10ms:
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
maxretries = 9
backoff = time.Millisecond * 10
)
var (
totalBackoff time.Duration
count int
retries int
)
for {
// This should never block but the read may fail. Because of this,
// we just try to read the random number generator until we get
// something. This is a very rare condition but may happen.
b := time.Duration(retries) * backoff
time.Sleep(b)
totalBackoff += b
n, err := io.ReadFull(rand.Reader, u[count:])
if err != nil {
if retryOnError(err) && retries < maxretries {
count += n
retries++
Loggerf("error generating version 4 uuid, retrying: %v", err)
continue
}
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
}
break
}
u[6] = (u[6] & 0x0f) | 0x40 // set version byte
u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
return u
}
// Parse attempts to extract a uuid from the string or returns an error.
func Parse(s string) (u UUID, err error) {
if len(s) != 36 {
return UUID{}, ErrUUIDInvalid
}
// create stack addresses for each section of the uuid.
p := make([][]byte, 5)
if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
return u, err
}
copy(u[0:4], p[0])
copy(u[4:6], p[1])
copy(u[6:8], p[2])
copy(u[8:10], p[3])
copy(u[10:16], p[4])
return
}
func (u UUID) String() string {
return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
}
// retryOnError tries to detect whether or not retrying would be fruitful.
func retryOnError(err error) bool {
switch err := err.(type) {
case *os.PathError:
return retryOnError(err.Err) // unpack the target error
case syscall.Errno:
if err == syscall.EPERM {
// EPERM represents an entropy pool exhaustion, a condition under
// which we backoff and retry.
return true
}
}
return false
}

22
vendor/github.com/docker/docker/api/types/auth.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
package types // import "github.com/docker/docker/api/types"
// AuthConfig contains authorization information for connecting to a Registry
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth,omitempty"`
// Email is an optional value associated with the username.
// This field is deprecated and will be removed in a later
// version of docker.
Email string `json:"email,omitempty"`
ServerAddress string `json:"serveraddress,omitempty"`
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string `json:"identitytoken,omitempty"`
// RegistryToken is a bearer token to be sent to a registry
RegistryToken string `json:"registrytoken,omitempty"`
}

419
vendor/github.com/docker/docker/api/types/client.go generated vendored Normal file
View File

@@ -0,0 +1,419 @@
package types // import "github.com/docker/docker/api/types"
import (
"bufio"
"io"
"net"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
units "github.com/docker/go-units"
)
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
type CheckpointCreateOptions struct {
CheckpointID string
CheckpointDir string
Exit bool
}
// CheckpointListOptions holds parameters to list checkpoints for a container
type CheckpointListOptions struct {
CheckpointDir string
}
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
type CheckpointDeleteOptions struct {
CheckpointID string
CheckpointDir string
}
// ContainerAttachOptions holds parameters to attach to a container.
type ContainerAttachOptions struct {
Stream bool
Stdin bool
Stdout bool
Stderr bool
DetachKeys string
Logs bool
}
// ContainerCommitOptions holds parameters to commit changes into a container.
type ContainerCommitOptions struct {
Reference string
Comment string
Author string
Changes []string
Pause bool
Config *container.Config
}
// ContainerExecInspect holds information returned by exec inspect.
type ContainerExecInspect struct {
ExecID string `json:"ID"`
ContainerID string
Running bool
ExitCode int
Pid int
}
// ContainerListOptions holds parameters to list containers with.
type ContainerListOptions struct {
Quiet bool
Size bool
All bool
Latest bool
Since string
Before string
Limit int
Filters filters.Args
}
// ContainerLogsOptions holds parameters to filter logs with.
type ContainerLogsOptions struct {
ShowStdout bool
ShowStderr bool
Since string
Until string
Timestamps bool
Follow bool
Tail string
Details bool
}
// ContainerRemoveOptions holds parameters to remove containers.
type ContainerRemoveOptions struct {
RemoveVolumes bool
RemoveLinks bool
Force bool
}
// ContainerStartOptions holds parameters to start containers.
type ContainerStartOptions struct {
CheckpointID string
CheckpointDir string
}
// CopyToContainerOptions holds information
// about files to copy into a container
type CopyToContainerOptions struct {
AllowOverwriteDirWithFile bool
CopyUIDGID bool
}
// EventsOptions holds parameters to filter events with.
type EventsOptions struct {
Since string
Until string
Filters filters.Args
}
// NetworkListOptions holds parameters to filter the list of networks with.
type NetworkListOptions struct {
Filters filters.Args
}
// HijackedResponse holds connection information for a hijacked request.
type HijackedResponse struct {
Conn net.Conn
Reader *bufio.Reader
}
// Close closes the hijacked connection and reader.
func (h *HijackedResponse) Close() {
h.Conn.Close()
}
// CloseWriter is an interface that implements structs
// that close input streams to prevent from writing.
type CloseWriter interface {
CloseWrite() error
}
// CloseWrite closes a readWriter for writing.
func (h *HijackedResponse) CloseWrite() error {
if conn, ok := h.Conn.(CloseWriter); ok {
return conn.CloseWrite()
}
return nil
}
// ImageBuildOptions holds the information
// necessary to build images.
type ImageBuildOptions struct {
Tags []string
SuppressOutput bool
RemoteContext string
NoCache bool
Remove bool
ForceRemove bool
PullParent bool
Isolation container.Isolation
CPUSetCPUs string
CPUSetMems string
CPUShares int64
CPUQuota int64
CPUPeriod int64
Memory int64
MemorySwap int64
CgroupParent string
NetworkMode string
ShmSize int64
Dockerfile string
Ulimits []*units.Ulimit
// BuildArgs needs to be a *string instead of just a string so that
// we can tell the difference between "" (empty string) and no value
// at all (nil). See the parsing of buildArgs in
// api/server/router/build/build_routes.go for even more info.
BuildArgs map[string]*string
AuthConfigs map[string]AuthConfig
Context io.Reader
Labels map[string]string
// squash the resulting image's layers to the parent
// preserves the original image and creates a new one from the parent with all
// the changes applied to a single layer
Squash bool
// CacheFrom specifies images that are used for matching cache. Images
// specified here do not need to have a valid parent chain to match cache.
CacheFrom []string
SecurityOpt []string
ExtraHosts []string // List of extra hosts
Target string
SessionID string
Platform string
// Version specifies the version of the unerlying builder to use
Version BuilderVersion
// BuildID is an optional identifier that can be passed together with the
// build request. The same identifier can be used to gracefully cancel the
// build with the cancel request.
BuildID string
// Outputs defines configurations for exporting build results. Only supported
// in BuildKit mode
Outputs []ImageBuildOutput
}
// ImageBuildOutput defines configuration for exporting a build result
type ImageBuildOutput struct {
Type string
Attrs map[string]string
}
// BuilderVersion sets the version of underlying builder to use
type BuilderVersion string
const (
// BuilderV1 is the first generation builder in docker daemon
BuilderV1 BuilderVersion = "1"
// BuilderBuildKit is builder based on moby/buildkit project
BuilderBuildKit BuilderVersion = "2"
)
// ImageBuildResponse holds information
// returned by a server after building
// an image.
type ImageBuildResponse struct {
Body io.ReadCloser
OSType string
}
// ImageCreateOptions holds information to create images.
type ImageCreateOptions struct {
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
}
// ImageImportSource holds source information for ImageImport
type ImageImportSource struct {
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
}
// ImageImportOptions holds information to import images from the client host.
type ImageImportOptions struct {
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
Message string // Message is the message to tag the image with
Changes []string // Changes are the raw changes to apply to this image
Platform string // Platform is the target platform of the image
}
// ImageListOptions holds parameters to filter the list of images with.
type ImageListOptions struct {
All bool
Filters filters.Args
}
// ImageLoadResponse returns information to the client about a load process.
type ImageLoadResponse struct {
// Body must be closed to avoid a resource leak
Body io.ReadCloser
JSON bool
}
// ImagePullOptions holds information to pull images.
type ImagePullOptions struct {
All bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
PrivilegeFunc RequestPrivilegeFunc
Platform string
}
// RequestPrivilegeFunc is a function interface that
// clients can supply to retry operations after
// getting an authorization error.
// This function returns the registry authentication
// header value in base 64 format, or an error
// if the privilege request fails.
type RequestPrivilegeFunc func() (string, error)
// ImagePushOptions holds information to push images.
type ImagePushOptions ImagePullOptions
// ImageRemoveOptions holds parameters to remove images.
type ImageRemoveOptions struct {
Force bool
PruneChildren bool
}
// ImageSearchOptions holds parameters to search images with.
type ImageSearchOptions struct {
RegistryAuth string
PrivilegeFunc RequestPrivilegeFunc
Filters filters.Args
Limit int
}
// ResizeOptions holds parameters to resize a tty.
// It can be used to resize container ttys and
// exec process ttys too.
type ResizeOptions struct {
Height uint
Width uint
}
// NodeListOptions holds parameters to list nodes with.
type NodeListOptions struct {
Filters filters.Args
}
// NodeRemoveOptions holds parameters to remove nodes with.
type NodeRemoveOptions struct {
Force bool
}
// ServiceCreateOptions contains the options to use when creating a service.
type ServiceCreateOptions struct {
// EncodedRegistryAuth is the encoded registry authorization credentials to
// use when updating the service.
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
// QueryRegistry indicates whether the service update requires
// contacting a registry. A registry may be contacted to retrieve
// the image digest and manifest, which in turn can be used to update
// platform or other information about the service.
QueryRegistry bool
}
// ServiceCreateResponse contains the information returned to a client
// on the creation of a new service.
type ServiceCreateResponse struct {
// ID is the ID of the created service.
ID string
// Warnings is a set of non-fatal warning messages to pass on to the user.
Warnings []string `json:",omitempty"`
}
// Values for RegistryAuthFrom in ServiceUpdateOptions
const (
RegistryAuthFromSpec = "spec"
RegistryAuthFromPreviousSpec = "previous-spec"
)
// ServiceUpdateOptions contains the options to be used for updating services.
type ServiceUpdateOptions struct {
// EncodedRegistryAuth is the encoded registry authorization credentials to
// use when updating the service.
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
// into this field. While it does open API users up to racy writes, most
// users may not need that level of consistency in practice.
// RegistryAuthFrom specifies where to find the registry authorization
// credentials if they are not given in EncodedRegistryAuth. Valid
// values are "spec" and "previous-spec".
RegistryAuthFrom string
// Rollback indicates whether a server-side rollback should be
// performed. When this is set, the provided spec will be ignored.
// The valid values are "previous" and "none". An empty value is the
// same as "none".
Rollback string
// QueryRegistry indicates whether the service update requires
// contacting a registry. A registry may be contacted to retrieve
// the image digest and manifest, which in turn can be used to update
// platform or other information about the service.
QueryRegistry bool
}
// ServiceListOptions holds parameters to list services with.
type ServiceListOptions struct {
Filters filters.Args
// Status indicates whether the server should include the service task
// count of running and desired tasks.
Status bool
}
// ServiceInspectOptions holds parameters related to the "service inspect"
// operation.
type ServiceInspectOptions struct {
InsertDefaults bool
}
// TaskListOptions holds parameters to list tasks with.
type TaskListOptions struct {
Filters filters.Args
}
// PluginRemoveOptions holds parameters to remove plugins.
type PluginRemoveOptions struct {
Force bool
}
// PluginEnableOptions holds parameters to enable plugins.
type PluginEnableOptions struct {
Timeout int
}
// PluginDisableOptions holds parameters to disable plugins.
type PluginDisableOptions struct {
Force bool
}
// PluginInstallOptions holds parameters to install a plugin.
type PluginInstallOptions struct {
Disabled bool
AcceptAllPermissions bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
RemoteRef string // RemoteRef is the plugin name on the registry
PrivilegeFunc RequestPrivilegeFunc
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
Args []string
}
// SwarmUnlockKeyResponse contains the response for Engine API:
// GET /swarm/unlockkey
type SwarmUnlockKeyResponse struct {
// UnlockKey is the unlock key in ASCII-armored format.
UnlockKey string
}
// PluginCreateOptions hold all options to plugin create.
type PluginCreateOptions struct {
RepoName string
}

64
vendor/github.com/docker/docker/api/types/configs.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
package types // import "github.com/docker/docker/api/types"
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
)
// configs holds structs used for internal communication between the
// frontend (such as an http server) and the backend (such as the
// docker daemon).
// ContainerCreateConfig is the parameter set to ContainerCreate()
type ContainerCreateConfig struct {
Name string
Config *container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
AdjustCPUShares bool
}
// ContainerRmConfig holds arguments for the container remove
// operation. This struct is used to tell the backend what operations
// to perform.
type ContainerRmConfig struct {
ForceRemove, RemoveVolume, RemoveLink bool
}
// ExecConfig is a small subset of the Config struct that holds the configuration
// for the exec feature of docker.
type ExecConfig struct {
User string // User that will run the command
Privileged bool // Is the container in privileged mode
Tty bool // Attach standard streams to a tty.
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStderr bool // Attach the standard error
AttachStdout bool // Attach the standard output
Detach bool // Execute in detach mode
DetachKeys string // Escape keys for detach
Env []string // Environment variables
WorkingDir string // Working directory
Cmd []string // Execution commands and args
}
// PluginRmConfig holds arguments for plugin remove.
type PluginRmConfig struct {
ForceRemove bool
}
// PluginEnableConfig holds arguments for plugin enable
type PluginEnableConfig struct {
Timeout int
}
// PluginDisableConfig holds arguments for plugin disable.
type PluginDisableConfig struct {
ForceDisable bool
}
// NetworkListConfig stores the options available for listing networks
type NetworkListConfig struct {
// TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here
Detailed bool
Verbose bool
}

View File

@@ -0,0 +1,13 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ErrorResponse Represents an error.
// swagger:model ErrorResponse
type ErrorResponse struct {
// The error message.
// Required: true
Message string `json:"message"`
}

View File

@@ -0,0 +1,6 @@
package types
// Error returns the error message
func (e ErrorResponse) Error() string {
return e.Message
}

View File

@@ -0,0 +1,17 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// GraphDriverData Information about a container's graph driver.
// swagger:model GraphDriverData
type GraphDriverData struct {
// data
// Required: true
Data map[string]string `json:"Data"`
// name
// Required: true
Name string `json:"Name"`
}

View File

@@ -0,0 +1,13 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// IDResponse Response to an API call that returns just an Id
// swagger:model IdResponse
type IDResponse struct {
// The id of the newly created object.
// Required: true
ID string `json:"Id"`
}

View File

@@ -0,0 +1,15 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ImageDeleteResponseItem image delete response item
// swagger:model ImageDeleteResponseItem
type ImageDeleteResponseItem struct {
// The image ID of an image that was deleted
Deleted string `json:"Deleted,omitempty"`
// The image ID of an image that was untagged
Untagged string `json:"Untagged,omitempty"`
}

View File

@@ -0,0 +1,49 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ImageSummary image summary
// swagger:model ImageSummary
type ImageSummary struct {
// containers
// Required: true
Containers int64 `json:"Containers"`
// created
// Required: true
Created int64 `json:"Created"`
// Id
// Required: true
ID string `json:"Id"`
// labels
// Required: true
Labels map[string]string `json:"Labels"`
// parent Id
// Required: true
ParentID string `json:"ParentId"`
// repo digests
// Required: true
RepoDigests []string `json:"RepoDigests"`
// repo tags
// Required: true
RepoTags []string `json:"RepoTags"`
// shared size
// Required: true
SharedSize int64 `json:"SharedSize"`
// size
// Required: true
Size int64 `json:"Size"`
// virtual size
// Required: true
VirtualSize int64 `json:"VirtualSize"`
}

203
vendor/github.com/docker/docker/api/types/plugin.go generated vendored Normal file
View File

@@ -0,0 +1,203 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Plugin A plugin for the Engine API
// swagger:model Plugin
type Plugin struct {
// config
// Required: true
Config PluginConfig `json:"Config"`
// True if the plugin is running. False if the plugin is not running, only installed.
// Required: true
Enabled bool `json:"Enabled"`
// Id
ID string `json:"Id,omitempty"`
// name
// Required: true
Name string `json:"Name"`
// plugin remote reference used to push/pull the plugin
PluginReference string `json:"PluginReference,omitempty"`
// settings
// Required: true
Settings PluginSettings `json:"Settings"`
}
// PluginConfig The config of a plugin.
// swagger:model PluginConfig
type PluginConfig struct {
// args
// Required: true
Args PluginConfigArgs `json:"Args"`
// description
// Required: true
Description string `json:"Description"`
// Docker Version used to create the plugin
DockerVersion string `json:"DockerVersion,omitempty"`
// documentation
// Required: true
Documentation string `json:"Documentation"`
// entrypoint
// Required: true
Entrypoint []string `json:"Entrypoint"`
// env
// Required: true
Env []PluginEnv `json:"Env"`
// interface
// Required: true
Interface PluginConfigInterface `json:"Interface"`
// ipc host
// Required: true
IpcHost bool `json:"IpcHost"`
// linux
// Required: true
Linux PluginConfigLinux `json:"Linux"`
// mounts
// Required: true
Mounts []PluginMount `json:"Mounts"`
// network
// Required: true
Network PluginConfigNetwork `json:"Network"`
// pid host
// Required: true
PidHost bool `json:"PidHost"`
// propagated mount
// Required: true
PropagatedMount string `json:"PropagatedMount"`
// user
User PluginConfigUser `json:"User,omitempty"`
// work dir
// Required: true
WorkDir string `json:"WorkDir"`
// rootfs
Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
}
// PluginConfigArgs plugin config args
// swagger:model PluginConfigArgs
type PluginConfigArgs struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// settable
// Required: true
Settable []string `json:"Settable"`
// value
// Required: true
Value []string `json:"Value"`
}
// PluginConfigInterface The interface between Docker and the plugin
// swagger:model PluginConfigInterface
type PluginConfigInterface struct {
// Protocol to use for clients connecting to the plugin.
ProtocolScheme string `json:"ProtocolScheme,omitempty"`
// socket
// Required: true
Socket string `json:"Socket"`
// types
// Required: true
Types []PluginInterfaceType `json:"Types"`
}
// PluginConfigLinux plugin config linux
// swagger:model PluginConfigLinux
type PluginConfigLinux struct {
// allow all devices
// Required: true
AllowAllDevices bool `json:"AllowAllDevices"`
// capabilities
// Required: true
Capabilities []string `json:"Capabilities"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
}
// PluginConfigNetwork plugin config network
// swagger:model PluginConfigNetwork
type PluginConfigNetwork struct {
// type
// Required: true
Type string `json:"Type"`
}
// PluginConfigRootfs plugin config rootfs
// swagger:model PluginConfigRootfs
type PluginConfigRootfs struct {
// diff ids
DiffIds []string `json:"diff_ids"`
// type
Type string `json:"type,omitempty"`
}
// PluginConfigUser plugin config user
// swagger:model PluginConfigUser
type PluginConfigUser struct {
// g ID
GID uint32 `json:"GID,omitempty"`
// UID
UID uint32 `json:"UID,omitempty"`
}
// PluginSettings Settings that can be modified by users.
// swagger:model PluginSettings
type PluginSettings struct {
// args
// Required: true
Args []string `json:"Args"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
// env
// Required: true
Env []string `json:"Env"`
// mounts
// Required: true
Mounts []PluginMount `json:"Mounts"`
}

View File

@@ -0,0 +1,25 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginDevice plugin device
// swagger:model PluginDevice
type PluginDevice struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// path
// Required: true
Path *string `json:"Path"`
// settable
// Required: true
Settable []string `json:"Settable"`
}

View File

@@ -0,0 +1,25 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginEnv plugin env
// swagger:model PluginEnv
type PluginEnv struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// settable
// Required: true
Settable []string `json:"Settable"`
// value
// Required: true
Value *string `json:"Value"`
}

View File

@@ -0,0 +1,21 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginInterfaceType plugin interface type
// swagger:model PluginInterfaceType
type PluginInterfaceType struct {
// capability
// Required: true
Capability string `json:"Capability"`
// prefix
// Required: true
Prefix string `json:"Prefix"`
// version
// Required: true
Version string `json:"Version"`
}

View File

@@ -0,0 +1,37 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginMount plugin mount
// swagger:model PluginMount
type PluginMount struct {
// description
// Required: true
Description string `json:"Description"`
// destination
// Required: true
Destination string `json:"Destination"`
// name
// Required: true
Name string `json:"Name"`
// options
// Required: true
Options []string `json:"Options"`
// settable
// Required: true
Settable []string `json:"Settable"`
// source
// Required: true
Source *string `json:"Source"`
// type
// Required: true
Type string `json:"Type"`
}

View File

@@ -0,0 +1,71 @@
package types // import "github.com/docker/docker/api/types"
import (
"encoding/json"
"fmt"
"sort"
)
// PluginsListResponse contains the response for the Engine API
type PluginsListResponse []*Plugin
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
versionIndex := len(p)
prefixIndex := 0
if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
return fmt.Errorf("%q is not a plugin interface type", p)
}
p = p[1 : len(p)-1]
loop:
for i, b := range p {
switch b {
case '.':
prefixIndex = i
case '/':
versionIndex = i
break loop
}
}
t.Prefix = string(p[:prefixIndex])
t.Capability = string(p[prefixIndex+1 : versionIndex])
if versionIndex < len(p) {
t.Version = string(p[versionIndex+1:])
}
return nil
}
// MarshalJSON implements json.Marshaler for PluginInterfaceType
func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
return json.Marshal(t.String())
}
// String implements fmt.Stringer for PluginInterfaceType
func (t PluginInterfaceType) String() string {
return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
}
// PluginPrivilege describes a permission the user has to accept
// upon installing a plugin.
type PluginPrivilege struct {
Name string
Description string
Value []string
}
// PluginPrivileges is a list of PluginPrivilege
type PluginPrivileges []PluginPrivilege
func (s PluginPrivileges) Len() int {
return len(s)
}
func (s PluginPrivileges) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
func (s PluginPrivileges) Swap(i, j int) {
sort.Strings(s[i].Value)
sort.Strings(s[j].Value)
s[i], s[j] = s[j], s[i]
}

23
vendor/github.com/docker/docker/api/types/port.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Port An open port on a container
// swagger:model Port
type Port struct {
// Host IP address that the container's port is mapped to
IP string `json:"IP,omitempty"`
// Port on the container
// Required: true
PrivatePort uint16 `json:"PrivatePort"`
// Port exposed on the host
PublicPort uint16 `json:"PublicPort,omitempty"`
// type
// Required: true
Type string `json:"Type"`
}

94
vendor/github.com/docker/docker/api/types/seccomp.go generated vendored Normal file
View File

@@ -0,0 +1,94 @@
package types // import "github.com/docker/docker/api/types"
// Seccomp represents the config for a seccomp profile for syscall restriction.
type Seccomp struct {
DefaultAction Action `json:"defaultAction"`
// Architectures is kept to maintain backward compatibility with the old
// seccomp profile.
Architectures []Arch `json:"architectures,omitempty"`
ArchMap []Architecture `json:"archMap,omitempty"`
Syscalls []*Syscall `json:"syscalls"`
}
// Architecture is used to represent a specific architecture
// and its sub-architectures
type Architecture struct {
Arch Arch `json:"architecture"`
SubArches []Arch `json:"subArchitectures"`
}
// Arch used for architectures
type Arch string
// Additional architectures permitted to be used for system calls
// By default only the native architecture of the kernel is permitted
const (
ArchX86 Arch = "SCMP_ARCH_X86"
ArchX86_64 Arch = "SCMP_ARCH_X86_64"
ArchX32 Arch = "SCMP_ARCH_X32"
ArchARM Arch = "SCMP_ARCH_ARM"
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
ArchMIPS Arch = "SCMP_ARCH_MIPS"
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
ArchPPC Arch = "SCMP_ARCH_PPC"
ArchPPC64 Arch = "SCMP_ARCH_PPC64"
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
ArchS390 Arch = "SCMP_ARCH_S390"
ArchS390X Arch = "SCMP_ARCH_S390X"
)
// Action taken upon Seccomp rule match
type Action string
// Define actions for Seccomp rules
const (
ActKill Action = "SCMP_ACT_KILL"
ActTrap Action = "SCMP_ACT_TRAP"
ActErrno Action = "SCMP_ACT_ERRNO"
ActTrace Action = "SCMP_ACT_TRACE"
ActAllow Action = "SCMP_ACT_ALLOW"
)
// Operator used to match syscall arguments in Seccomp
type Operator string
// Define operators for syscall arguments in Seccomp
const (
OpNotEqual Operator = "SCMP_CMP_NE"
OpLessThan Operator = "SCMP_CMP_LT"
OpLessEqual Operator = "SCMP_CMP_LE"
OpEqualTo Operator = "SCMP_CMP_EQ"
OpGreaterEqual Operator = "SCMP_CMP_GE"
OpGreaterThan Operator = "SCMP_CMP_GT"
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
)
// Arg used for matching specific syscall arguments in Seccomp
type Arg struct {
Index uint `json:"index"`
Value uint64 `json:"value"`
ValueTwo uint64 `json:"valueTwo"`
Op Operator `json:"op"`
}
// Filter is used to conditionally apply Seccomp rules
type Filter struct {
Caps []string `json:"caps,omitempty"`
Arches []string `json:"arches,omitempty"`
MinKernel string `json:"minKernel,omitempty"`
}
// Syscall is used to match a group of syscalls in Seccomp
type Syscall struct {
Name string `json:"name,omitempty"`
Names []string `json:"names,omitempty"`
Action Action `json:"action"`
Args []*Arg `json:"args"`
Comment string `json:"comment"`
Includes Filter `json:"includes"`
Excludes Filter `json:"excludes"`
}

View File

@@ -0,0 +1,12 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ServiceUpdateResponse service update response
// swagger:model ServiceUpdateResponse
type ServiceUpdateResponse struct {
// Optional warning messages
Warnings []string `json:"Warnings"`
}

181
vendor/github.com/docker/docker/api/types/stats.go generated vendored Normal file
View File

@@ -0,0 +1,181 @@
// Package types is used for API stability in the types and response to the
// consumers of the API stats endpoint.
package types // import "github.com/docker/docker/api/types"
import "time"
// ThrottlingData stores CPU throttling stats of one running container.
// Not used on Windows.
type ThrottlingData struct {
// Number of periods with throttling active
Periods uint64 `json:"periods"`
// Number of periods when the container hits its throttling limit.
ThrottledPeriods uint64 `json:"throttled_periods"`
// Aggregate time the container was throttled for in nanoseconds.
ThrottledTime uint64 `json:"throttled_time"`
}
// CPUUsage stores All CPU stats aggregated since container inception.
type CPUUsage struct {
// Total CPU time consumed.
// Units: nanoseconds (Linux)
// Units: 100's of nanoseconds (Windows)
TotalUsage uint64 `json:"total_usage"`
// Total CPU time consumed per core (Linux). Not used on Windows.
// Units: nanoseconds.
PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
// Time spent by tasks of the cgroup in kernel mode (Linux).
// Time spent by all container processes in kernel mode (Windows).
// Units: nanoseconds (Linux).
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
// Time spent by tasks of the cgroup in user mode (Linux).
// Time spent by all container processes in user mode (Windows).
// Units: nanoseconds (Linux).
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
UsageInUsermode uint64 `json:"usage_in_usermode"`
}
// CPUStats aggregates and wraps all CPU related info of container
type CPUStats struct {
// CPU Usage. Linux and Windows.
CPUUsage CPUUsage `json:"cpu_usage"`
// System Usage. Linux only.
SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
// Online CPUs. Linux only.
OnlineCPUs uint32 `json:"online_cpus,omitempty"`
// Throttling Data. Linux only.
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
}
// MemoryStats aggregates all memory stats since container inception on Linux.
// Windows returns stats for commit and private working set only.
type MemoryStats struct {
// Linux Memory Stats
// current res_counter usage for memory
Usage uint64 `json:"usage,omitempty"`
// maximum usage ever recorded.
MaxUsage uint64 `json:"max_usage,omitempty"`
// TODO(vishh): Export these as stronger types.
// all the stats exported via memory.stat.
Stats map[string]uint64 `json:"stats,omitempty"`
// number of times memory usage hits limits.
Failcnt uint64 `json:"failcnt,omitempty"`
Limit uint64 `json:"limit,omitempty"`
// Windows Memory Stats
// See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
// committed bytes
Commit uint64 `json:"commitbytes,omitempty"`
// peak committed bytes
CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
// private working set
PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
}
// BlkioStatEntry is one small entity to store a piece of Blkio stats
// Not used on Windows.
type BlkioStatEntry struct {
Major uint64 `json:"major"`
Minor uint64 `json:"minor"`
Op string `json:"op"`
Value uint64 `json:"value"`
}
// BlkioStats stores All IO service stats for data read and write.
// This is a Linux specific structure as the differences between expressing
// block I/O on Windows and Linux are sufficiently significant to make
// little sense attempting to morph into a combined structure.
type BlkioStats struct {
// number of bytes transferred to and from the block device
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
}
// StorageStats is the disk I/O stats for read/write on Windows.
type StorageStats struct {
ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
}
// NetworkStats aggregates the network stats of one container
type NetworkStats struct {
// Bytes received. Windows and Linux.
RxBytes uint64 `json:"rx_bytes"`
// Packets received. Windows and Linux.
RxPackets uint64 `json:"rx_packets"`
// Received errors. Not used on Windows. Note that we don't `omitempty` this
// field as it is expected in the >=v1.21 API stats structure.
RxErrors uint64 `json:"rx_errors"`
// Incoming packets dropped. Windows and Linux.
RxDropped uint64 `json:"rx_dropped"`
// Bytes sent. Windows and Linux.
TxBytes uint64 `json:"tx_bytes"`
// Packets sent. Windows and Linux.
TxPackets uint64 `json:"tx_packets"`
// Sent errors. Not used on Windows. Note that we don't `omitempty` this
// field as it is expected in the >=v1.21 API stats structure.
TxErrors uint64 `json:"tx_errors"`
// Outgoing packets dropped. Windows and Linux.
TxDropped uint64 `json:"tx_dropped"`
// Endpoint ID. Not used on Linux.
EndpointID string `json:"endpoint_id,omitempty"`
// Instance ID. Not used on Linux.
InstanceID string `json:"instance_id,omitempty"`
}
// PidsStats contains the stats of a container's pids
type PidsStats struct {
// Current is the number of pids in the cgroup
Current uint64 `json:"current,omitempty"`
// Limit is the hard limit on the number of pids in the cgroup.
// A "Limit" of 0 means that there is no limit.
Limit uint64 `json:"limit,omitempty"`
}
// Stats is Ultimate struct aggregating all types of stats of one container
type Stats struct {
// Common stats
Read time.Time `json:"read"`
PreRead time.Time `json:"preread"`
// Linux specific stats, not populated on Windows.
PidsStats PidsStats `json:"pids_stats,omitempty"`
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
// Windows specific stats, not populated on Linux.
NumProcs uint32 `json:"num_procs"`
StorageStats StorageStats `json:"storage_stats,omitempty"`
// Shared stats
CPUStats CPUStats `json:"cpu_stats,omitempty"`
PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
}
// StatsJSON is newly used Networks
type StatsJSON struct {
Stats
Name string `json:"name,omitempty"`
ID string `json:"id,omitempty"`
// Networks request version >=1.21
Networks map[string]NetworkStats `json:"networks,omitempty"`
}

617
vendor/github.com/docker/docker/api/types/types.go generated vendored Normal file
View File

@@ -0,0 +1,617 @@
package types // import "github.com/docker/docker/api/types"
import (
"errors"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/go-connections/nat"
)
// RootFS returns Image's RootFS description including the layer IDs.
type RootFS struct {
Type string
Layers []string `json:",omitempty"`
BaseLayer string `json:",omitempty"`
}
// ImageInspect contains response of Engine API:
// GET "/images/{name:.*}/json"
type ImageInspect struct {
ID string `json:"Id"`
RepoTags []string
RepoDigests []string
Parent string
Comment string
Created string
Container string
ContainerConfig *container.Config
DockerVersion string
Author string
Config *container.Config
Architecture string
Variant string `json:",omitempty"`
Os string
OsVersion string `json:",omitempty"`
Size int64
VirtualSize int64
GraphDriver GraphDriverData
RootFS RootFS
Metadata ImageMetadata
}
// ImageMetadata contains engine-local data about the image
type ImageMetadata struct {
LastTagTime time.Time `json:",omitempty"`
}
// Container contains response of Engine API:
// GET "/containers/json"
type Container struct {
ID string `json:"Id"`
Names []string
Image string
ImageID string
Command string
Created int64
Ports []Port
SizeRw int64 `json:",omitempty"`
SizeRootFs int64 `json:",omitempty"`
Labels map[string]string
State string
Status string
HostConfig struct {
NetworkMode string `json:",omitempty"`
}
NetworkSettings *SummaryNetworkSettings
Mounts []MountPoint
}
// CopyConfig contains request body of Engine API:
// POST "/containers/"+containerID+"/copy"
type CopyConfig struct {
Resource string
}
// ContainerPathStat is used to encode the header from
// GET "/containers/{name:.*}/archive"
// "Name" is the file or directory name.
type ContainerPathStat struct {
Name string `json:"name"`
Size int64 `json:"size"`
Mode os.FileMode `json:"mode"`
Mtime time.Time `json:"mtime"`
LinkTarget string `json:"linkTarget"`
}
// ContainerStats contains response of Engine API:
// GET "/stats"
type ContainerStats struct {
Body io.ReadCloser `json:"body"`
OSType string `json:"ostype"`
}
// Ping contains response of Engine API:
// GET "/_ping"
type Ping struct {
APIVersion string
OSType string
Experimental bool
BuilderVersion BuilderVersion
}
// ComponentVersion describes the version information for a specific component.
type ComponentVersion struct {
Name string
Version string
Details map[string]string `json:",omitempty"`
}
// Version contains response of Engine API:
// GET "/version"
type Version struct {
Platform struct{ Name string } `json:",omitempty"`
Components []ComponentVersion `json:",omitempty"`
// The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility
Version string
APIVersion string `json:"ApiVersion"`
MinAPIVersion string `json:"MinAPIVersion,omitempty"`
GitCommit string
GoVersion string
Os string
Arch string
KernelVersion string `json:",omitempty"`
Experimental bool `json:",omitempty"`
BuildTime string `json:",omitempty"`
}
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
// in the version-string of external tools, such as containerd, or runC.
type Commit struct {
ID string // ID is the actual commit ID of external tool.
Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
}
// Info contains response of Engine API:
// GET "/info"
type Info struct {
ID string
Containers int
ContainersRunning int
ContainersPaused int
ContainersStopped int
Images int
Driver string
DriverStatus [][2]string
SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API
Plugins PluginsInfo
MemoryLimit bool
SwapLimit bool
KernelMemory bool
KernelMemoryTCP bool
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
CPUCfsQuota bool `json:"CpuCfsQuota"`
CPUShares bool
CPUSet bool
PidsLimit bool
IPv4Forwarding bool
BridgeNfIptables bool
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
Debug bool
NFd int
OomKillDisable bool
NGoroutines int
SystemTime string
LoggingDriver string
CgroupDriver string
NEventsListener int
KernelVersion string
OperatingSystem string
OSVersion string
OSType string
Architecture string
IndexServerAddress string
RegistryConfig *registry.ServiceConfig
NCPU int
MemTotal int64
GenericResources []swarm.GenericResource
DockerRootDir string
HTTPProxy string `json:"HttpProxy"`
HTTPSProxy string `json:"HttpsProxy"`
NoProxy string
Name string
Labels []string
ExperimentalBuild bool
ServerVersion string
ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
Runtimes map[string]Runtime
DefaultRuntime string
Swarm swarm.Info
// LiveRestoreEnabled determines whether containers should be kept
// running when the daemon is shutdown or upon daemon start if
// running containers are detected
LiveRestoreEnabled bool
Isolation container.Isolation
InitBinary string
ContainerdCommit Commit
RuncCommit Commit
InitCommit Commit
SecurityOptions []string
ProductLicense string `json:",omitempty"`
Warnings []string
}
// KeyValue holds a key/value pair
type KeyValue struct {
Key, Value string
}
// SecurityOpt contains the name and options of a security option
type SecurityOpt struct {
Name string
Options []KeyValue
}
// DecodeSecurityOptions decodes a security options string slice to a type safe
// SecurityOpt
func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
so := []SecurityOpt{}
for _, opt := range opts {
// support output from a < 1.13 docker daemon
if !strings.Contains(opt, "=") {
so = append(so, SecurityOpt{Name: opt})
continue
}
secopt := SecurityOpt{}
split := strings.Split(opt, ",")
for _, s := range split {
kv := strings.SplitN(s, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("invalid security option %q", s)
}
if kv[0] == "" || kv[1] == "" {
return nil, errors.New("invalid empty security option")
}
if kv[0] == "name" {
secopt.Name = kv[1]
continue
}
secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
}
so = append(so, secopt)
}
return so, nil
}
// PluginsInfo is a temp struct holding Plugins name
// registered with docker daemon. It is used by Info struct
type PluginsInfo struct {
// List of Volume plugins registered
Volume []string
// List of Network plugins registered
Network []string
// List of Authorization plugins registered
Authorization []string
// List of Log plugins registered
Log []string
}
// ExecStartCheck is a temp struct used by execStart
// Config fields is part of ExecConfig in runconfig package
type ExecStartCheck struct {
// ExecStart will first check if it's detached
Detach bool
// Check if there's a tty
Tty bool
}
// HealthcheckResult stores information about a single run of a healthcheck probe
type HealthcheckResult struct {
Start time.Time // Start is the time this check started
End time.Time // End is the time this check ended
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
Output string // Output from last check
}
// Health states
const (
NoHealthcheck = "none" // Indicates there is no healthcheck
Starting = "starting" // Starting indicates that the container is not yet ready
Healthy = "healthy" // Healthy indicates that the container is running correctly
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
)
// Health stores information about the container's healthcheck results
type Health struct {
Status string // Status is one of Starting, Healthy or Unhealthy
FailingStreak int // FailingStreak is the number of consecutive failures
Log []*HealthcheckResult // Log contains the last few results (oldest first)
}
// ContainerState stores container's running state
// it's part of ContainerJSONBase and will return by "inspect" command
type ContainerState struct {
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
Running bool
Paused bool
Restarting bool
OOMKilled bool
Dead bool
Pid int
ExitCode int
Error string
StartedAt string
FinishedAt string
Health *Health `json:",omitempty"`
}
// ContainerNode stores information about the node that a container
// is running on. It's only used by the Docker Swarm standalone API
type ContainerNode struct {
ID string
IPAddress string `json:"IP"`
Addr string
Name string
Cpus int
Memory int64
Labels map[string]string
}
// ContainerJSONBase contains response of Engine API:
// GET "/containers/{name:.*}/json"
type ContainerJSONBase struct {
ID string `json:"Id"`
Created string
Path string
Args []string
State *ContainerState
Image string
ResolvConfPath string
HostnamePath string
HostsPath string
LogPath string
Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API
Name string
RestartCount int
Driver string
Platform string
MountLabel string
ProcessLabel string
AppArmorProfile string
ExecIDs []string
HostConfig *container.HostConfig
GraphDriver GraphDriverData
SizeRw *int64 `json:",omitempty"`
SizeRootFs *int64 `json:",omitempty"`
}
// ContainerJSON is newly used struct along with MountPoint
type ContainerJSON struct {
*ContainerJSONBase
Mounts []MountPoint
Config *container.Config
NetworkSettings *NetworkSettings
}
// NetworkSettings exposes the network settings in the api
type NetworkSettings struct {
NetworkSettingsBase
DefaultNetworkSettings
Networks map[string]*network.EndpointSettings
}
// SummaryNetworkSettings provides a summary of container's networks
// in /containers/json
type SummaryNetworkSettings struct {
Networks map[string]*network.EndpointSettings
}
// NetworkSettingsBase holds basic information about networks
type NetworkSettingsBase struct {
Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
SandboxID string // SandboxID uniquely represents a container's network stack
HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
SandboxKey string // SandboxKey identifies the sandbox
SecondaryIPAddresses []network.Address
SecondaryIPv6Addresses []network.Address
}
// DefaultNetworkSettings holds network information
// during the 2 release deprecation period.
// It will be removed in Docker 1.11.
type DefaultNetworkSettings struct {
EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
Gateway string // Gateway holds the gateway address for the network
GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
IPAddress string // IPAddress holds the IPv4 address for the network
IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
MacAddress string // MacAddress holds the MAC address for the network
}
// MountPoint represents a mount point configuration inside the container.
// This is used for reporting the mountpoints in use by a container.
type MountPoint struct {
Type mount.Type `json:",omitempty"`
Name string `json:",omitempty"`
Source string
Destination string
Driver string `json:",omitempty"`
Mode string
RW bool
Propagation mount.Propagation
}
// NetworkResource is the body of the "get network" http response message
type NetworkResource struct {
Name string // Name is the requested name of the network
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
Created time.Time // Created is the time the network created
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
IPAM network.IPAM // IPAM is the network's IP Address Management
Internal bool // Internal represents if the network is used internal only
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
Options map[string]string // Options holds the network specific options to use for when creating the network
Labels map[string]string // Labels holds metadata specific to the network being created
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
Services map[string]network.ServiceInfo `json:",omitempty"`
}
// EndpointResource contains network resources allocated and used for a container in a network
type EndpointResource struct {
Name string
EndpointID string
MacAddress string
IPv4Address string
IPv6Address string
}
// NetworkCreate is the expected body of the "create network" http request message
type NetworkCreate struct {
// Check for networks with duplicate names.
// Network is primarily keyed based on a random ID and not on the name.
// Network name is strictly a user-friendly alias to the network
// which is uniquely identified using ID.
// And there is no guaranteed way to check for duplicates.
// Option CheckDuplicate is there to provide a best effort checking of any networks
// which has the same name but it is not guaranteed to catch all name collisions.
CheckDuplicate bool
Driver string
Scope string
EnableIPv6 bool
IPAM *network.IPAM
Internal bool
Attachable bool
Ingress bool
ConfigOnly bool
ConfigFrom *network.ConfigReference
Options map[string]string
Labels map[string]string
}
// NetworkCreateRequest is the request message sent to the server for network create call.
type NetworkCreateRequest struct {
NetworkCreate
Name string
}
// NetworkCreateResponse is the response message sent by the server for network create call
type NetworkCreateResponse struct {
ID string `json:"Id"`
Warning string
}
// NetworkConnect represents the data to be used to connect a container to the network
type NetworkConnect struct {
Container string
EndpointConfig *network.EndpointSettings `json:",omitempty"`
}
// NetworkDisconnect represents the data to be used to disconnect a container from the network
type NetworkDisconnect struct {
Container string
Force bool
}
// NetworkInspectOptions holds parameters to inspect network
type NetworkInspectOptions struct {
Scope string
Verbose bool
}
// Checkpoint represents the details of a checkpoint
type Checkpoint struct {
Name string // Name is the name of the checkpoint
}
// Runtime describes an OCI runtime
type Runtime struct {
Path string `json:"path"`
Args []string `json:"runtimeArgs,omitempty"`
}
// DiskUsage contains response of Engine API:
// GET "/system/df"
type DiskUsage struct {
LayersSize int64
Images []*ImageSummary
Containers []*Container
Volumes []*Volume
BuildCache []*BuildCache
BuilderSize int64 // deprecated
}
// ContainersPruneReport contains the response for Engine API:
// POST "/containers/prune"
type ContainersPruneReport struct {
ContainersDeleted []string
SpaceReclaimed uint64
}
// VolumesPruneReport contains the response for Engine API:
// POST "/volumes/prune"
type VolumesPruneReport struct {
VolumesDeleted []string
SpaceReclaimed uint64
}
// ImagesPruneReport contains the response for Engine API:
// POST "/images/prune"
type ImagesPruneReport struct {
ImagesDeleted []ImageDeleteResponseItem
SpaceReclaimed uint64
}
// BuildCachePruneReport contains the response for Engine API:
// POST "/build/prune"
type BuildCachePruneReport struct {
CachesDeleted []string
SpaceReclaimed uint64
}
// NetworksPruneReport contains the response for Engine API:
// POST "/networks/prune"
type NetworksPruneReport struct {
NetworksDeleted []string
}
// SecretCreateResponse contains the information returned to a client
// on the creation of a new secret.
type SecretCreateResponse struct {
// ID is the id of the created secret.
ID string
}
// SecretListOptions holds parameters to list secrets
type SecretListOptions struct {
Filters filters.Args
}
// ConfigCreateResponse contains the information returned to a client
// on the creation of a new config.
type ConfigCreateResponse struct {
// ID is the id of the created config.
ID string
}
// ConfigListOptions holds parameters to list configs
type ConfigListOptions struct {
Filters filters.Args
}
// PushResult contains the tag, manifest digest, and manifest size from the
// push. It's used to signal this information to the trust code in the client
// so it can sign the manifest if necessary.
type PushResult struct {
Tag string
Digest string
Size int
}
// BuildResult contains the image id of a successful build
type BuildResult struct {
ID string
}
// BuildCache contains information about a build cache record
type BuildCache struct {
ID string
Parent string
Type string
Description string
InUse bool
Shared bool
Size int64
CreatedAt time.Time
LastUsedAt *time.Time
UsageCount int
}
// BuildCachePruneOptions hold parameters to prune the build cache
type BuildCachePruneOptions struct {
All bool
KeepStorage int64
Filters filters.Args
}

69
vendor/github.com/docker/docker/api/types/volume.go generated vendored Normal file
View File

@@ -0,0 +1,69 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Volume volume
// swagger:model Volume
type Volume struct {
// Date/Time the volume was created.
CreatedAt string `json:"CreatedAt,omitempty"`
// Name of the volume driver used by the volume.
// Required: true
Driver string `json:"Driver"`
// User-defined key/value metadata.
// Required: true
Labels map[string]string `json:"Labels"`
// Mount path of the volume on the host.
// Required: true
Mountpoint string `json:"Mountpoint"`
// Name of the volume.
// Required: true
Name string `json:"Name"`
// The driver specific options used when creating the volume.
// Required: true
Options map[string]string `json:"Options"`
// The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
// Required: true
Scope string `json:"Scope"`
// Low-level details about the volume, provided by the volume driver.
// Details are returned as a map with key/value pairs:
// `{"key":"value","key2":"value2"}`.
//
// The `Status` field is optional, and is omitted if the volume driver
// does not support this feature.
//
Status map[string]interface{} `json:"Status,omitempty"`
// usage data
UsageData *VolumeUsageData `json:"UsageData,omitempty"`
}
// VolumeUsageData Usage details about the volume. This information is used by the
// `GET /system/df` endpoint, and omitted in other endpoints.
//
// swagger:model VolumeUsageData
type VolumeUsageData struct {
// The number of containers referencing this volume. This field
// is set to `-1` if the reference-count is not available.
//
// Required: true
RefCount int64 `json:"RefCount"`
// Amount of disk space used by the volume (in bytes). This information
// is only available for volumes created with the `"local"` volume
// driver. For volumes created with other volume drivers, this field
// is set to `-1` ("not available")
//
// Required: true
Size int64 `json:"Size"`
}

View File

@@ -0,0 +1 @@
This package provides helper functions for dealing with string identifiers

View File

@@ -0,0 +1,63 @@
// Package stringid provides helper functions for dealing with string identifiers
package stringid // import "github.com/docker/docker/pkg/stringid"
import (
"crypto/rand"
"encoding/hex"
"fmt"
"regexp"
"strconv"
"strings"
)
const shortLen = 12
var (
validShortID = regexp.MustCompile("^[a-f0-9]{12}$")
validHex = regexp.MustCompile(`^[a-f0-9]{64}$`)
)
// IsShortID determines if an arbitrary string *looks like* a short ID.
func IsShortID(id string) bool {
return validShortID.MatchString(id)
}
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a longer prefix, or the full-length Id.
func TruncateID(id string) string {
if i := strings.IndexRune(id, ':'); i >= 0 {
id = id[i+1:]
}
if len(id) > shortLen {
id = id[:shortLen]
}
return id
}
// GenerateRandomID returns a unique id.
func GenerateRandomID() string {
b := make([]byte, 32)
for {
if _, err := rand.Read(b); err != nil {
panic(err) // This shouldn't happen
}
id := hex.EncodeToString(b)
// if we try to parse the truncated for as an int and we don't have
// an error then the value is all numeric and causes issues when
// used as a hostname. ref #3869
if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
continue
}
return id
}
}
// ValidateID checks whether an ID string is a valid image ID.
func ValidateID(id string) error {
if ok := validHex.MatchString(id); !ok {
return fmt.Errorf("image ID %q is invalid", id)
}
return nil
}

View File

@@ -0,0 +1,21 @@
package tarsum // import "github.com/docker/docker/pkg/tarsum"
// BuilderContext is an interface extending TarSum by adding the Remove method.
// In general there was concern about adding this method to TarSum itself
// so instead it is being added just to "BuilderContext" which will then
// only be used during the .dockerignore file processing
// - see builder/evaluator.go
type BuilderContext interface {
TarSum
Remove(string)
}
func (bc *tarSum) Remove(filename string) {
for i, fis := range bc.sums {
if fis.Name() == filename {
bc.sums = append(bc.sums[:i], bc.sums[i+1:]...)
// Note, we don't just return because there could be
// more than one with this name
}
}
}

View File

@@ -0,0 +1,133 @@
package tarsum // import "github.com/docker/docker/pkg/tarsum"
import (
"runtime"
"sort"
"strings"
)
// FileInfoSumInterface provides an interface for accessing file checksum
// information within a tar file. This info is accessed through interface
// so the actual name and sum cannot be melded with.
type FileInfoSumInterface interface {
// File name
Name() string
// Checksum of this particular file and its headers
Sum() string
// Position of file in the tar
Pos() int64
}
type fileInfoSum struct {
name string
sum string
pos int64
}
func (fis fileInfoSum) Name() string {
return fis.name
}
func (fis fileInfoSum) Sum() string {
return fis.sum
}
func (fis fileInfoSum) Pos() int64 {
return fis.pos
}
// FileInfoSums provides a list of FileInfoSumInterfaces.
type FileInfoSums []FileInfoSumInterface
// GetFile returns the first FileInfoSumInterface with a matching name.
func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
// We do case insensitive matching on Windows as c:\APP and c:\app are
// the same. See issue #33107.
for i := range fis {
if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) ||
(runtime.GOOS != "windows" && fis[i].Name() == name) {
return fis[i]
}
}
return nil
}
// GetAllFile returns a FileInfoSums with all matching names.
func (fis FileInfoSums) GetAllFile(name string) FileInfoSums {
f := FileInfoSums{}
for i := range fis {
if fis[i].Name() == name {
f = append(f, fis[i])
}
}
return f
}
// GetDuplicatePaths returns a FileInfoSums with all duplicated paths.
func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) {
seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map.
for i := range fis {
f := fis[i]
if _, ok := seen[f.Name()]; ok {
dups = append(dups, f)
} else {
seen[f.Name()] = 0
}
}
return dups
}
// Len returns the size of the FileInfoSums.
func (fis FileInfoSums) Len() int { return len(fis) }
// Swap swaps two FileInfoSum values if a FileInfoSums list.
func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] }
// SortByPos sorts FileInfoSums content by position.
func (fis FileInfoSums) SortByPos() {
sort.Sort(byPos{fis})
}
// SortByNames sorts FileInfoSums content by name.
func (fis FileInfoSums) SortByNames() {
sort.Sort(byName{fis})
}
// SortBySums sorts FileInfoSums content by sums.
func (fis FileInfoSums) SortBySums() {
dups := fis.GetDuplicatePaths()
if len(dups) > 0 {
sort.Sort(bySum{fis, dups})
} else {
sort.Sort(bySum{fis, nil})
}
}
// byName is a sort.Sort helper for sorting by file names.
// If names are the same, order them by their appearance in the tar archive
type byName struct{ FileInfoSums }
func (bn byName) Less(i, j int) bool {
if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() {
return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos()
}
return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name()
}
// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive
type bySum struct {
FileInfoSums
dups FileInfoSums
}
func (bs bySum) Less(i, j int) bool {
if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() {
return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos()
}
return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum()
}
// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order
type byPos struct{ FileInfoSums }
func (bp byPos) Less(i, j int) bool {
return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos()
}

301
vendor/github.com/docker/docker/pkg/tarsum/tarsum.go generated vendored Normal file
View File

@@ -0,0 +1,301 @@
// Package tarsum provides algorithms to perform checksum calculation on
// filesystem layers.
//
// The transportation of filesystems, regarding Docker, is done with tar(1)
// archives. There are a variety of tar serialization formats [2], and a key
// concern here is ensuring a repeatable checksum given a set of inputs from a
// generic tar archive. Types of transportation include distribution to and from a
// registry endpoint, saving and loading through commands or Docker daemon APIs,
// transferring the build context from client to Docker daemon, and committing the
// filesystem of a container to become an image.
//
// As tar archives are used for transit, but not preserved in many situations, the
// focus of the algorithm is to ensure the integrity of the preserved filesystem,
// while maintaining a deterministic accountability. This includes neither
// constraining the ordering or manipulation of the files during the creation or
// unpacking of the archive, nor include additional metadata state about the file
// system attributes.
package tarsum // import "github.com/docker/docker/pkg/tarsum"
import (
"archive/tar"
"bytes"
"compress/gzip"
"crypto"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"path"
"strings"
)
const (
buf8K = 8 * 1024
buf16K = 16 * 1024
buf32K = 32 * 1024
)
// NewTarSum creates a new interface for calculating a fixed time checksum of a
// tar archive.
//
// This is used for calculating checksums of layers of an image, in some cases
// including the byte payload of the image's json metadata as well, and for
// calculating the checksums for buildcache.
func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
return NewTarSumHash(r, dc, v, DefaultTHash)
}
// NewTarSumHash creates a new TarSum, providing a THash to use rather than
// the DefaultTHash.
func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) {
headerSelector, err := getTarHeaderSelector(v)
if err != nil {
return nil, err
}
ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
err = ts.initTarSum()
return ts, err
}
// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label.
func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) {
parts := strings.SplitN(label, "+", 2)
if len(parts) != 2 {
return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}")
}
versionName, hashName := parts[0], parts[1]
version, ok := tarSumVersionsByName[versionName]
if !ok {
return nil, fmt.Errorf("unknown TarSum version name: %q", versionName)
}
hashConfig, ok := standardHashConfigs[hashName]
if !ok {
return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName)
}
tHash := NewTHash(hashConfig.name, hashConfig.hash.New)
return NewTarSumHash(r, disableCompression, version, tHash)
}
// TarSum is the generic interface for calculating fixed time
// checksums of a tar archive.
type TarSum interface {
io.Reader
GetSums() FileInfoSums
Sum([]byte) string
Version() Version
Hash() THash
}
// tarSum struct is the structure for a Version0 checksum calculation.
type tarSum struct {
io.Reader
tarR *tar.Reader
tarW *tar.Writer
writer writeCloseFlusher
bufTar *bytes.Buffer
bufWriter *bytes.Buffer
bufData []byte
h hash.Hash
tHash THash
sums FileInfoSums
fileCounter int64
currentFile string
finished bool
first bool
DisableCompression bool // false by default. When false, the output gzip compressed.
tarSumVersion Version // this field is not exported so it can not be mutated during use
headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive
}
func (ts tarSum) Hash() THash {
return ts.tHash
}
func (ts tarSum) Version() Version {
return ts.tarSumVersion
}
// THash provides a hash.Hash type generator and its name.
type THash interface {
Hash() hash.Hash
Name() string
}
// NewTHash is a convenience method for creating a THash.
func NewTHash(name string, h func() hash.Hash) THash {
return simpleTHash{n: name, h: h}
}
type tHashConfig struct {
name string
hash crypto.Hash
}
var (
// NOTE: DO NOT include MD5 or SHA1, which are considered insecure.
standardHashConfigs = map[string]tHashConfig{
"sha256": {name: "sha256", hash: crypto.SHA256},
"sha512": {name: "sha512", hash: crypto.SHA512},
}
)
// DefaultTHash is default TarSum hashing algorithm - "sha256".
var DefaultTHash = NewTHash("sha256", sha256.New)
type simpleTHash struct {
n string
h func() hash.Hash
}
func (sth simpleTHash) Name() string { return sth.n }
func (sth simpleTHash) Hash() hash.Hash { return sth.h() }
func (ts *tarSum) encodeHeader(h *tar.Header) error {
for _, elem := range ts.headerSelector.selectHeaders(h) {
// Ignore these headers to be compatible with versions
// before go 1.10
if elem[0] == "gname" || elem[0] == "uname" {
elem[1] = ""
}
if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil {
return err
}
}
return nil
}
func (ts *tarSum) initTarSum() error {
ts.bufTar = bytes.NewBuffer([]byte{})
ts.bufWriter = bytes.NewBuffer([]byte{})
ts.tarR = tar.NewReader(ts.Reader)
ts.tarW = tar.NewWriter(ts.bufTar)
if !ts.DisableCompression {
ts.writer = gzip.NewWriter(ts.bufWriter)
} else {
ts.writer = &nopCloseFlusher{Writer: ts.bufWriter}
}
if ts.tHash == nil {
ts.tHash = DefaultTHash
}
ts.h = ts.tHash.Hash()
ts.h.Reset()
ts.first = true
ts.sums = FileInfoSums{}
return nil
}
func (ts *tarSum) Read(buf []byte) (int, error) {
if ts.finished {
return ts.bufWriter.Read(buf)
}
if len(ts.bufData) < len(buf) {
switch {
case len(buf) <= buf8K:
ts.bufData = make([]byte, buf8K)
case len(buf) <= buf16K:
ts.bufData = make([]byte, buf16K)
case len(buf) <= buf32K:
ts.bufData = make([]byte, buf32K)
default:
ts.bufData = make([]byte, len(buf))
}
}
buf2 := ts.bufData[:len(buf)]
n, err := ts.tarR.Read(buf2)
if err != nil {
if err == io.EOF {
if _, err := ts.h.Write(buf2[:n]); err != nil {
return 0, err
}
if !ts.first {
ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter})
ts.fileCounter++
ts.h.Reset()
} else {
ts.first = false
}
if _, err := ts.tarW.Write(buf2[:n]); err != nil {
return 0, err
}
currentHeader, err := ts.tarR.Next()
if err != nil {
if err == io.EOF {
if err := ts.tarW.Close(); err != nil {
return 0, err
}
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
return 0, err
}
if err := ts.writer.Close(); err != nil {
return 0, err
}
ts.finished = true
return ts.bufWriter.Read(buf)
}
return 0, err
}
ts.currentFile = path.Join(".", path.Join("/", currentHeader.Name))
if err := ts.encodeHeader(currentHeader); err != nil {
return 0, err
}
if err := ts.tarW.WriteHeader(currentHeader); err != nil {
return 0, err
}
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
return 0, err
}
ts.writer.Flush()
return ts.bufWriter.Read(buf)
}
return 0, err
}
// Filling the hash buffer
if _, err = ts.h.Write(buf2[:n]); err != nil {
return 0, err
}
// Filling the tar writer
if _, err = ts.tarW.Write(buf2[:n]); err != nil {
return 0, err
}
// Filling the output writer
if _, err = io.Copy(ts.writer, ts.bufTar); err != nil {
return 0, err
}
ts.writer.Flush()
return ts.bufWriter.Read(buf)
}
func (ts *tarSum) Sum(extra []byte) string {
ts.sums.SortBySums()
h := ts.tHash.Hash()
if extra != nil {
h.Write(extra)
}
for _, fis := range ts.sums {
h.Write([]byte(fis.Sum()))
}
checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil))
return checksum
}
func (ts *tarSum) GetSums() FileInfoSums {
return ts.sums
}

View File

@@ -0,0 +1,230 @@
page_title: TarSum checksum specification
page_description: Documentation for algorithms used in the TarSum checksum calculation
page_keywords: docker, checksum, validation, tarsum
# TarSum Checksum Specification
## Abstract
This document describes the algorithms used in performing the TarSum checksum
calculation on filesystem layers, the need for this method over existing
methods, and the versioning of this calculation.
## Warning
This checksum algorithm is for best-effort comparison of file trees with fuzzy logic.
This is _not_ a cryptographic attestation, and should not be considered secure.
## Introduction
The transportation of filesystems, regarding Docker, is done with tar(1)
archives. There are a variety of tar serialization formats [2], and a key
concern here is ensuring a repeatable checksum given a set of inputs from a
generic tar archive. Types of transportation include distribution to and from a
registry endpoint, saving and loading through commands or Docker daemon APIs,
transferring the build context from client to Docker daemon, and committing the
filesystem of a container to become an image.
As tar archives are used for transit, but not preserved in many situations, the
focus of the algorithm is to ensure the integrity of the preserved filesystem,
while maintaining a deterministic accountability. This includes neither
constraining the ordering or manipulation of the files during the creation or
unpacking of the archive, nor include additional metadata state about the file
system attributes.
## Intended Audience
This document is outlining the methods used for consistent checksum calculation
for filesystems transported via tar archives.
Auditing these methodologies is an open and iterative process. This document
should accommodate the review of source code. Ultimately, this document should
be the starting point of further refinements to the algorithm and its future
versions.
## Concept
The checksum mechanism must ensure the integrity and assurance of the
filesystem payload.
## Checksum Algorithm Profile
A checksum mechanism must define the following operations and attributes:
* Associated hashing cipher - used to checksum each file payload and attribute
information.
* Checksum list - each file of the filesystem archive has its checksum
calculated from the payload and attributes of the file. The final checksum is
calculated from this list, with specific ordering.
* Version - as the algorithm adapts to requirements, there are behaviors of the
algorithm to manage by versioning.
* Archive being calculated - the tar archive having its checksum calculated
## Elements of TarSum checksum
The calculated sum output is a text string. The elements included in the output
of the calculated sum comprise the information needed for validation of the sum
(TarSum version and hashing cipher used) and the expected checksum in hexadecimal
form.
There are two delimiters used:
* '+' separates TarSum version from hashing cipher
* ':' separates calculation mechanics from expected hash
Example:
```
"tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e"
| | \ |
| | \ |
|_version_|_cipher__|__ |
| \ |
|_calculation_mechanics_|______________________expected_sum_______________________|
```
## Versioning
Versioning was introduced [0] to accommodate differences in calculation needed,
and ability to maintain reverse compatibility.
The general algorithm will be describe further in the 'Calculation'.
### Version0
This is the initial version of TarSum.
Its element in the TarSum checksum string is `tarsum`.
### Version1
Its element in the TarSum checksum is `tarsum.v1`.
The notable changes in this version:
* Exclusion of file `mtime` from the file information headers, in each file
checksum calculation
* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax
tar file info headers) keys and values in each file checksum calculation
### VersionDev
*Do not use unless validating refinements to the checksum algorithm*
Its element in the TarSum checksum is `tarsum.dev`.
This is a floating place holder for a next version and grounds for testing
changes. The methods used for calculation are subject to change without notice,
and this version is for testing and not for production use.
## Ciphers
The official default and standard hashing cipher used in the calculation mechanic
is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4.
Though the TarSum algorithm itself is not exclusively bound to the single
hashing cipher `sha256`, support for alternate hashing ciphers was later added
[1]. Use cases for alternate cipher could include future-proofing TarSum
checksum format and using faster cipher hashes for tar filesystem checksums.
## Calculation
### Requirement
As mentioned earlier, the calculation is such that it takes into consideration
the lifecycle of the tar archive. In that the tar archive is not an immutable,
permanent artifact. Otherwise options like relying on a known hashing cipher
checksum of the archive itself would be reliable enough. The tar archive of the
filesystem is used as a transportation medium for Docker images, and the
archive is discarded once its contents are extracted. Therefore, for consistent
validation items such as order of files in the tar archive and time stamps are
subject to change once an image is received.
### Process
The method is typically iterative due to reading tar info headers from the
archive stream, though this is not a strict requirement.
#### Files
Each file in the tar archive have their contents (headers and body) checksummed
individually using the designated associated hashing cipher. The ordered
headers of the file are written to the checksum calculation first, and then the
payload of the file body.
The resulting checksum of the file is appended to the list of file sums. The
sum is encoded as a string of the hexadecimal digest. Additionally, the file
name and position in the archive is kept as reference for special ordering.
#### Headers
The following headers are read, in this
order ( and the corresponding representation of its value):
* 'name' - string
* 'mode' - string of the base10 integer
* 'uid' - string of the integer
* 'gid' - string of the integer
* 'size' - string of the integer
* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC
* 'typeflag' - string of the char
* 'linkname' - string
* 'uname' - string
* 'gname' - string
* 'devmajor' - string of the integer
* 'devminor' - string of the integer
For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax
headers) included after the above list. These xattrs key/values are first
sorted by the keys.
#### Header Format
The ordered headers are written to the hash in the format of
"{.key}{.value}"
with no newline.
#### Body
After the order headers of the file have been added to the checksum for the
file, the body of the file is written to the hash.
#### List of file sums
The list of file sums is sorted by the string of the hexadecimal digest.
If there are two files in the tar with matching paths, the order of occurrence
for that path is reflected for the sums of the corresponding file header and
body.
#### Final Checksum
Begin with a fresh or initial state of the associated hash cipher. If there is
additional payload to include in the TarSum calculation for the archive, it is
written first. Then each checksum from the ordered list of file sums is written
to the hash.
The resulting digest is formatted per the Elements of TarSum checksum,
including the TarSum version, the associated hash cipher and the hexadecimal
encoded checksum digest.
## Security Considerations
The initial version of TarSum has undergone one update that could invalidate
handcrafted tar archives. The tar archive format supports appending of files
with same names as prior files in the archive. The latter file will clobber the
prior file of the same path. Due to this the algorithm now accounts for files
with matching paths, and orders the list of file sums accordingly [3].
## Footnotes
* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0
* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e
* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29
* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31
## Acknowledgments
Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the
TarSum calculation.

View File

@@ -0,0 +1,158 @@
package tarsum // import "github.com/docker/docker/pkg/tarsum"
import (
"archive/tar"
"errors"
"io"
"sort"
"strconv"
"strings"
)
// Version is used for versioning of the TarSum algorithm
// based on the prefix of the hash used
// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"
type Version int
// Prefix of "tarsum"
const (
Version0 Version = iota
Version1
// VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation
VersionDev
)
// WriteV1Header writes a tar header to a writer in V1 tarsum format.
func WriteV1Header(h *tar.Header, w io.Writer) {
for _, elem := range v1TarHeaderSelect(h) {
w.Write([]byte(elem[0] + elem[1]))
}
}
// VersionLabelForChecksum returns the label for the given tarsum
// checksum, i.e., everything before the first `+` character in
// the string or an empty string if no label separator is found.
func VersionLabelForChecksum(checksum string) string {
// Checksums are in the form: {versionLabel}+{hashID}:{hex}
sepIndex := strings.Index(checksum, "+")
if sepIndex < 0 {
return ""
}
return checksum[:sepIndex]
}
// GetVersions gets a list of all known tarsum versions.
func GetVersions() []Version {
v := []Version{}
for k := range tarSumVersions {
v = append(v, k)
}
return v
}
var (
tarSumVersions = map[Version]string{
Version0: "tarsum",
Version1: "tarsum.v1",
VersionDev: "tarsum.dev",
}
tarSumVersionsByName = map[string]Version{
"tarsum": Version0,
"tarsum.v1": Version1,
"tarsum.dev": VersionDev,
}
)
func (tsv Version) String() string {
return tarSumVersions[tsv]
}
// GetVersionFromTarsum returns the Version from the provided string.
func GetVersionFromTarsum(tarsum string) (Version, error) {
tsv := tarsum
if strings.Contains(tarsum, "+") {
tsv = strings.SplitN(tarsum, "+", 2)[0]
}
for v, s := range tarSumVersions {
if s == tsv {
return v, nil
}
}
return -1, ErrNotVersion
}
// Errors that may be returned by functions in this package
var (
ErrNotVersion = errors.New("string does not include a TarSum Version")
ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented")
)
// tarHeaderSelector is the interface which different versions
// of tarsum should use for selecting and ordering tar headers
// for each item in the archive.
type tarHeaderSelector interface {
selectHeaders(h *tar.Header) (orderedHeaders [][2]string)
}
type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string)
func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) {
return f(h)
}
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
return [][2]string{
{"name", h.Name},
{"mode", strconv.FormatInt(h.Mode, 10)},
{"uid", strconv.Itoa(h.Uid)},
{"gid", strconv.Itoa(h.Gid)},
{"size", strconv.FormatInt(h.Size, 10)},
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
{"typeflag", string([]byte{h.Typeflag})},
{"linkname", h.Linkname},
{"uname", h.Uname},
{"gname", h.Gname},
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
{"devminor", strconv.FormatInt(h.Devminor, 10)},
}
}
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
// Get extended attributes.
xAttrKeys := make([]string, len(h.Xattrs))
for k := range h.Xattrs {
xAttrKeys = append(xAttrKeys, k)
}
sort.Strings(xAttrKeys)
// Make the slice with enough capacity to hold the 11 basic headers
// we want from the v0 selector plus however many xattrs we have.
orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
v0headers := v0TarHeaderSelect(h)
orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
orderedHeaders = append(orderedHeaders, v0headers[6:]...)
// Finally, append the sorted xattrs.
for _, k := range xAttrKeys {
orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
}
return
}
var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{
Version0: v0TarHeaderSelect,
Version1: v1TarHeaderSelect,
VersionDev: v1TarHeaderSelect,
}
func getTarHeaderSelector(v Version) (tarHeaderSelector, error) {
headerSelector, ok := registeredHeaderSelectors[v]
if !ok {
return nil, ErrVersionNotImplemented
}
return headerSelector, nil
}

View File

@@ -0,0 +1,22 @@
package tarsum // import "github.com/docker/docker/pkg/tarsum"
import (
"io"
)
type writeCloseFlusher interface {
io.WriteCloser
Flush() error
}
type nopCloseFlusher struct {
io.Writer
}
func (n *nopCloseFlusher) Close() error {
return nil
}
func (n *nopCloseFlusher) Flush() error {
return nil
}

295
vendor/github.com/docker/docker/registry/auth.go generated vendored Normal file
View File

@@ -0,0 +1,295 @@
package registry // import "github.com/docker/docker/registry"
import (
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/client/auth/challenge"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// AuthClientID is used the ClientID used for the token server
AuthClientID = "docker"
)
// loginV1 tries to register/login to the v1 registry server.
func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) {
registryEndpoint := apiEndpoint.ToV1Endpoint(userAgent, nil)
serverAddress := registryEndpoint.String()
logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress)
if serverAddress == "" {
return "", "", errdefs.System(errors.New("server Error: Server Address not set"))
}
req, err := http.NewRequest(http.MethodGet, serverAddress+"users/", nil)
if err != nil {
return "", "", err
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
resp, err := registryEndpoint.client.Do(req)
if err != nil {
// fallback when request could not be completed
return "", "", fallbackError{
err: err,
}
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", "", errdefs.System(err)
}
switch resp.StatusCode {
case http.StatusOK:
return "Login Succeeded", "", nil
case http.StatusUnauthorized:
return "", "", errdefs.Unauthorized(errors.New("Wrong login/password, please try again"))
case http.StatusForbidden:
// *TODO: Use registry configuration to determine what this says, if anything?
return "", "", errdefs.Forbidden(errors.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress))
case http.StatusInternalServerError:
logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body)
return "", "", errdefs.System(errors.New("Internal Server Error"))
}
return "", "", errdefs.System(errors.Errorf("Login: %s (Code: %d; Headers: %s)", body,
resp.StatusCode, resp.Header))
}
type loginCredentialStore struct {
authConfig *types.AuthConfig
}
func (lcs loginCredentialStore) Basic(*url.URL) (string, string) {
return lcs.authConfig.Username, lcs.authConfig.Password
}
func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string {
return lcs.authConfig.IdentityToken
}
func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) {
lcs.authConfig.IdentityToken = token
}
type staticCredentialStore struct {
auth *types.AuthConfig
}
// NewStaticCredentialStore returns a credential store
// which always returns the same credential values.
func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore {
return staticCredentialStore{
auth: auth,
}
}
func (scs staticCredentialStore) Basic(*url.URL) (string, string) {
if scs.auth == nil {
return "", ""
}
return scs.auth.Username, scs.auth.Password
}
func (scs staticCredentialStore) RefreshToken(*url.URL, string) string {
if scs.auth == nil {
return ""
}
return scs.auth.IdentityToken
}
func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) {
}
type fallbackError struct {
err error
}
func (err fallbackError) Error() string {
return err.err.Error()
}
// loginV2 tries to login to the v2 registry server. The given registry
// endpoint will be pinged to get authorization challenges. These challenges
// will be used to authenticate against the registry to validate credentials.
func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) {
logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/")
modifiers := Headers(userAgent, nil)
authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...)
credentialAuthConfig := *authConfig
creds := loginCredentialStore{
authConfig: &credentialAuthConfig,
}
loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil)
if err != nil {
return "", "", err
}
endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/"
req, err := http.NewRequest(http.MethodGet, endpointStr, nil)
if err != nil {
if !foundV2 {
err = fallbackError{err: err}
}
return "", "", err
}
resp, err := loginClient.Do(req)
if err != nil {
err = translateV2AuthError(err)
if !foundV2 {
err = fallbackError{err: err}
}
return "", "", err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return "Login Succeeded", credentialAuthConfig.IdentityToken, nil
}
// TODO(dmcgowan): Attempt to further interpret result, status code and error code string
err = errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode))
if !foundV2 {
err = fallbackError{err: err}
}
return "", "", err
}
func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) {
challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport)
if err != nil {
if !foundV2 {
err = fallbackError{err: err}
}
return nil, foundV2, err
}
tokenHandlerOptions := auth.TokenHandlerOptions{
Transport: authTransport,
Credentials: creds,
OfflineAccess: true,
ClientID: AuthClientID,
Scopes: scopes,
}
tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
basicHandler := auth.NewBasicHandler(creds)
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
tr := transport.NewTransport(authTransport, modifiers...)
return &http.Client{
Transport: tr,
Timeout: 15 * time.Second,
}, foundV2, nil
}
// ConvertToHostname converts a registry url which has http|https prepended
// to just an hostname.
func ConvertToHostname(url string) string {
stripped := url
if strings.HasPrefix(url, "http://") {
stripped = strings.TrimPrefix(url, "http://")
} else if strings.HasPrefix(url, "https://") {
stripped = strings.TrimPrefix(url, "https://")
}
nameParts := strings.SplitN(stripped, "/", 2)
return nameParts[0]
}
// ResolveAuthConfig matches an auth configuration to a server address or a URL
func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig {
configKey := GetAuthConfigKey(index)
// First try the happy case
if c, found := authConfigs[configKey]; found || index.Official {
return c
}
// Maybe they have a legacy config file, we will iterate the keys converting
// them to the new format and testing
for registry, ac := range authConfigs {
if configKey == ConvertToHostname(registry) {
return ac
}
}
// When all else fails, return an empty auth config
return types.AuthConfig{}
}
// PingResponseError is used when the response from a ping
// was received but invalid.
type PingResponseError struct {
Err error
}
func (err PingResponseError) Error() string {
return err.Err.Error()
}
// PingV2Registry attempts to ping a v2 registry and on success return a
// challenge manager for the supported authentication types and
// whether v2 was confirmed by the response. If a response is received but
// cannot be interpreted a PingResponseError will be returned.
func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) {
var (
foundV2 = false
v2Version = auth.APIVersion{
Type: "registry",
Version: "2.0",
}
)
pingClient := &http.Client{
Transport: transport,
Timeout: 15 * time.Second,
}
endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/"
req, err := http.NewRequest(http.MethodGet, endpointStr, nil)
if err != nil {
return nil, false, err
}
resp, err := pingClient.Do(req)
if err != nil {
return nil, false, err
}
defer resp.Body.Close()
versions := auth.APIVersions(resp, DefaultRegistryVersionHeader)
for _, pingVersion := range versions {
if pingVersion == v2Version {
// The version header indicates we're definitely
// talking to a v2 registry. So don't allow future
// fallbacks to the v1 protocol.
foundV2 = true
break
}
}
challengeManager := challenge.NewSimpleManager()
if err := challengeManager.AddResponse(resp); err != nil {
return nil, foundV2, PingResponseError{
Err: err,
}
}
return challengeManager, foundV2, nil
}

436
vendor/github.com/docker/docker/registry/config.go generated vendored Normal file
View File

@@ -0,0 +1,436 @@
package registry // import "github.com/docker/docker/registry"
import (
"fmt"
"net"
"net/url"
"regexp"
"strconv"
"strings"
"github.com/docker/distribution/reference"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ServiceOptions holds command line options.
type ServiceOptions struct {
AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"`
Mirrors []string `json:"registry-mirrors,omitempty"`
InsecureRegistries []string `json:"insecure-registries,omitempty"`
}
// serviceConfig holds daemon configuration for the registry service.
type serviceConfig struct {
registrytypes.ServiceConfig
}
var (
// DefaultNamespace is the default namespace
DefaultNamespace = "docker.io"
// DefaultRegistryVersionHeader is the name of the default HTTP header
// that carries Registry version info
DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version"
// IndexHostname is the index hostname
IndexHostname = "index.docker.io"
// IndexServer is used for user auth and image search
IndexServer = "https://" + IndexHostname + "/v1/"
// IndexName is the name of the index
IndexName = "docker.io"
// DefaultV2Registry is the URI of the default v2 registry
DefaultV2Registry = &url.URL{
Scheme: "https",
Host: "registry-1.docker.io",
}
)
var (
// ErrInvalidRepositoryName is an error returned if the repository name did
// not have the correct form
ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
emptyServiceConfig, _ = newServiceConfig(ServiceOptions{})
)
var (
validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`)
)
// for mocking in unit tests
var lookupIP = net.LookupIP
// newServiceConfig returns a new instance of ServiceConfig
func newServiceConfig(options ServiceOptions) (*serviceConfig, error) {
config := &serviceConfig{
ServiceConfig: registrytypes.ServiceConfig{
InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0),
IndexConfigs: make(map[string]*registrytypes.IndexInfo),
// Hack: Bypass setting the mirrors to IndexConfigs since they are going away
// and Mirrors are only for the official registry anyways.
},
}
if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil {
return nil, err
}
if err := config.LoadMirrors(options.Mirrors); err != nil {
return nil, err
}
if err := config.LoadInsecureRegistries(options.InsecureRegistries); err != nil {
return nil, err
}
return config, nil
}
// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config.
func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error {
cidrs := map[string]*registrytypes.NetIPNet{}
hostnames := map[string]bool{}
for _, r := range registries {
if _, err := ValidateIndexName(r); err != nil {
return err
}
if validateNoScheme(r) != nil {
return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r)
}
if _, ipnet, err := net.ParseCIDR(r); err == nil {
// Valid CIDR.
cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet)
} else if err := validateHostPort(r); err == nil {
// Must be `host:port` if not CIDR.
hostnames[r] = true
} else {
return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err)
}
}
config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0)
for _, c := range cidrs {
config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c)
}
config.AllowNondistributableArtifactsHostnames = make([]string, 0)
for h := range hostnames {
config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h)
}
return nil
}
// LoadMirrors loads mirrors to config, after removing duplicates.
// Returns an error if mirrors contains an invalid mirror.
func (config *serviceConfig) LoadMirrors(mirrors []string) error {
mMap := map[string]struct{}{}
unique := []string{}
for _, mirror := range mirrors {
m, err := ValidateMirror(mirror)
if err != nil {
return err
}
if _, exist := mMap[m]; !exist {
mMap[m] = struct{}{}
unique = append(unique, m)
}
}
config.Mirrors = unique
// Configure public registry since mirrors may have changed.
config.IndexConfigs[IndexName] = &registrytypes.IndexInfo{
Name: IndexName,
Mirrors: config.Mirrors,
Secure: true,
Official: true,
}
return nil
}
// LoadInsecureRegistries loads insecure registries to config
func (config *serviceConfig) LoadInsecureRegistries(registries []string) error {
// Localhost is by default considered as an insecure registry
// This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker).
//
// TODO: should we deprecate this once it is easier for people to set up a TLS registry or change
// daemon flags on boot2docker?
registries = append(registries, "127.0.0.0/8")
// Store original InsecureRegistryCIDRs and IndexConfigs
// Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info.
originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs
originalIndexInfos := config.ServiceConfig.IndexConfigs
config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0)
config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo)
skip:
for _, r := range registries {
// validate insecure registry
if _, err := ValidateIndexName(r); err != nil {
// before returning err, roll back to original data
config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs
config.ServiceConfig.IndexConfigs = originalIndexInfos
return err
}
if strings.HasPrefix(strings.ToLower(r), "http://") {
logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r)
r = r[7:]
} else if strings.HasPrefix(strings.ToLower(r), "https://") {
logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r)
r = r[8:]
} else if validateNoScheme(r) != nil {
// Insecure registry should not contain '://'
// before returning err, roll back to original data
config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs
config.ServiceConfig.IndexConfigs = originalIndexInfos
return fmt.Errorf("insecure registry %s should not contain '://'", r)
}
// Check if CIDR was passed to --insecure-registry
_, ipnet, err := net.ParseCIDR(r)
if err == nil {
// Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip.
data := (*registrytypes.NetIPNet)(ipnet)
for _, value := range config.InsecureRegistryCIDRs {
if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() {
continue skip
}
}
// ipnet is not found, add it in config.InsecureRegistryCIDRs
config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data)
} else {
if err := validateHostPort(r); err != nil {
config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs
config.ServiceConfig.IndexConfigs = originalIndexInfos
return fmt.Errorf("insecure registry %s is not valid: %v", r, err)
}
// Assume `host:port` if not CIDR.
config.IndexConfigs[r] = &registrytypes.IndexInfo{
Name: r,
Mirrors: make([]string, 0),
Secure: false,
Official: false,
}
}
}
// Configure public registry.
config.IndexConfigs[IndexName] = &registrytypes.IndexInfo{
Name: IndexName,
Mirrors: config.Mirrors,
Secure: true,
Official: true,
}
return nil
}
// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries
// that allow push of nondistributable artifacts.
//
// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP
// of the registry specified by hostname, true is returned.
//
// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name
// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If
// resolution fails, CIDR matching is not performed.
func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool {
for _, h := range config.AllowNondistributableArtifactsHostnames {
if h == hostname {
return true
}
}
return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname)
}
// isSecureIndex returns false if the provided indexName is part of the list of insecure registries
// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs.
//
// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet.
// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered
// insecure.
//
// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name
// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained
// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element
// of insecureRegistries.
func isSecureIndex(config *serviceConfig, indexName string) bool {
// Check for configured index, first. This is needed in case isSecureIndex
// is called from anything besides newIndexInfo, in order to honor per-index configurations.
if index, ok := config.IndexConfigs[indexName]; ok {
return index.Secure
}
return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName)
}
// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`)
// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be
// resolved to IP addresses for matching. If resolution fails, false is returned.
func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool {
host, _, err := net.SplitHostPort(URLHost)
if err != nil {
// Assume URLHost is of the form `host` without the port and go on.
host = URLHost
}
addrs, err := lookupIP(host)
if err != nil {
ip := net.ParseIP(host)
if ip != nil {
addrs = []net.IP{ip}
}
// if ip == nil, then `host` is neither an IP nor it could be looked up,
// either because the index is unreachable, or because the index is behind an HTTP proxy.
// So, len(addrs) == 0 and we're not aborting.
}
// Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined.
for _, addr := range addrs {
for _, ipnet := range cidrs {
// check if the addr falls in the subnet
if (*net.IPNet)(ipnet).Contains(addr) {
return true
}
}
}
return false
}
// ValidateMirror validates an HTTP(S) registry mirror
func ValidateMirror(val string) (string, error) {
uri, err := url.Parse(val)
if err != nil {
return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val)
}
if uri.Scheme != "http" && uri.Scheme != "https" {
return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri)
}
if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" {
return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri)
}
if uri.User != nil {
// strip password from output
uri.User = url.UserPassword(uri.User.Username(), "xxxxx")
return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri)
}
return strings.TrimSuffix(val, "/") + "/", nil
}
// ValidateIndexName validates an index name.
func ValidateIndexName(val string) (string, error) {
// TODO: upstream this to check to reference package
if val == "index.docker.io" {
val = "docker.io"
}
if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") {
return "", fmt.Errorf("invalid index name (%s). Cannot begin or end with a hyphen", val)
}
return val, nil
}
func validateNoScheme(reposName string) error {
if strings.Contains(reposName, "://") {
// It cannot contain a scheme!
return ErrInvalidRepositoryName
}
return nil
}
func validateHostPort(s string) error {
// Split host and port, and in case s can not be splitted, assume host only
host, port, err := net.SplitHostPort(s)
if err != nil {
host = s
port = ""
}
// If match against the `host:port` pattern fails,
// it might be `IPv6:port`, which will be captured by net.ParseIP(host)
if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil {
return fmt.Errorf("invalid host %q", host)
}
if port != "" {
v, err := strconv.Atoi(port)
if err != nil {
return err
}
if v < 0 || v > 65535 {
return fmt.Errorf("invalid port %q", port)
}
}
return nil
}
// newIndexInfo returns IndexInfo configuration from indexName
func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) {
var err error
indexName, err = ValidateIndexName(indexName)
if err != nil {
return nil, err
}
// Return any configured index info, first.
if index, ok := config.IndexConfigs[indexName]; ok {
return index, nil
}
// Construct a non-configured index info.
index := &registrytypes.IndexInfo{
Name: indexName,
Mirrors: make([]string, 0),
Official: false,
}
index.Secure = isSecureIndex(config, indexName)
return index, nil
}
// GetAuthConfigKey special-cases using the full index address of the official
// index as the AuthConfig key, and uses the (host)name[:port] for private indexes.
func GetAuthConfigKey(index *registrytypes.IndexInfo) string {
if index.Official {
return IndexServer
}
return index.Name
}
// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo
func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) {
index, err := newIndexInfo(config, reference.Domain(name))
if err != nil {
return nil, err
}
official := !strings.ContainsRune(reference.FamiliarName(name), '/')
return &RepositoryInfo{
Name: reference.TrimNamed(name),
Index: index,
Official: official,
}, nil
}
// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but
// lacks registry configuration.
func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) {
return newRepositoryInfo(emptyServiceConfig, reposName)
}
// ParseSearchIndexInfo will use repository name to get back an indexInfo.
func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) {
indexName, _ := splitReposSearchTerm(reposName)
indexInfo, err := newIndexInfo(emptyServiceConfig, indexName)
if err != nil {
return nil, err
}
return indexInfo, nil
}

View File

@@ -0,0 +1,16 @@
// +build !windows
package registry // import "github.com/docker/docker/registry"
var (
// CertsDir is the directory where certificates are stored
CertsDir = "/etc/docker/certs.d"
)
// cleanPath is used to ensure that a directory name is valid on the target
// platform. It will be passed in something *similar* to a URL such as
// https:/index.docker.io/v1. Not all platforms support directory names
// which contain those characters (such as : on Windows)
func cleanPath(s string) string {
return s
}

View File

@@ -0,0 +1,18 @@
package registry // import "github.com/docker/docker/registry"
import (
"os"
"path/filepath"
"strings"
)
// CertsDir is the directory where certificates are stored
var CertsDir = os.Getenv("programdata") + `\docker\certs.d`
// cleanPath is used to ensure that a directory name is valid on the target
// platform. It will be passed in something *similar* to a URL such as
// https:\index.docker.io\v1. Not all platforms support directory names
// which contain those characters (such as : on Windows)
func cleanPath(s string) string {
return filepath.FromSlash(strings.Replace(s, ":", "", -1))
}

195
vendor/github.com/docker/docker/registry/endpoint_v1.go generated vendored Normal file
View File

@@ -0,0 +1,195 @@
package registry // import "github.com/docker/docker/registry"
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/docker/distribution/registry/client/transport"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/sirupsen/logrus"
)
// V1Endpoint stores basic information about a V1 registry endpoint.
type V1Endpoint struct {
client *http.Client
URL *url.URL
IsSecure bool
}
// NewV1Endpoint parses the given address to return a registry endpoint.
func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
tlsConfig, err := newTLSConfig(index.Name, index.Secure)
if err != nil {
return nil, err
}
endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders)
if err != nil {
return nil, err
}
if err := validateEndpoint(endpoint); err != nil {
return nil, err
}
return endpoint, nil
}
func validateEndpoint(endpoint *V1Endpoint) error {
logrus.Debugf("pinging registry endpoint %s", endpoint)
// Try HTTPS ping to registry
endpoint.URL.Scheme = "https"
if _, err := endpoint.Ping(); err != nil {
if endpoint.IsSecure {
// If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry`
// in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP.
return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host)
}
// If registry is insecure and HTTPS failed, fallback to HTTP.
logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err)
endpoint.URL.Scheme = "http"
var err2 error
if _, err2 = endpoint.Ping(); err2 == nil {
return nil
}
return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2)
}
return nil
}
func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint {
endpoint := &V1Endpoint{
IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify,
URL: new(url.URL),
}
*endpoint.URL = address
// TODO(tiborvass): make sure a ConnectTimeout transport is used
tr := NewTransport(tlsConfig)
endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...))
return endpoint
}
// trimV1Address trims the version off the address and returns the
// trimmed address or an error if there is a non-V1 version.
func trimV1Address(address string) (string, error) {
var (
chunks []string
apiVersionStr string
)
if strings.HasSuffix(address, "/") {
address = address[:len(address)-1]
}
chunks = strings.Split(address, "/")
apiVersionStr = chunks[len(chunks)-1]
if apiVersionStr == "v1" {
return strings.Join(chunks[:len(chunks)-1], "/"), nil
}
for k, v := range apiVersions {
if k != APIVersion1 && apiVersionStr == v {
return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr)
}
}
return address, nil
}
func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") {
address = "https://" + address
}
address, err := trimV1Address(address)
if err != nil {
return nil, err
}
uri, err := url.Parse(address)
if err != nil {
return nil, err
}
endpoint := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders)
return endpoint, nil
}
// Get the formatted URL for the root of this registry Endpoint
func (e *V1Endpoint) String() string {
return e.URL.String() + "/v1/"
}
// Path returns a formatted string for the URL
// of this endpoint with the given path appended.
func (e *V1Endpoint) Path(path string) string {
return e.URL.String() + "/v1/" + path
}
// Ping returns a PingResult which indicates whether the registry is standalone or not.
func (e *V1Endpoint) Ping() (PingResult, error) {
logrus.Debugf("attempting v1 ping for registry endpoint %s", e)
if e.String() == IndexServer {
// Skip the check, we know this one is valid
// (and we never want to fallback to http in case of error)
return PingResult{Standalone: false}, nil
}
req, err := http.NewRequest(http.MethodGet, e.Path("_ping"), nil)
if err != nil {
return PingResult{Standalone: false}, err
}
resp, err := e.client.Do(req)
if err != nil {
return PingResult{Standalone: false}, err
}
defer resp.Body.Close()
jsonString, err := ioutil.ReadAll(resp.Body)
if err != nil {
return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err)
}
// If the header is absent, we assume true for compatibility with earlier
// versions of the registry. default to true
info := PingResult{
Standalone: true,
}
if err := json.Unmarshal(jsonString, &info); err != nil {
logrus.Debugf("Error unmarshaling the _ping PingResult: %s", err)
// don't stop here. Just assume sane defaults
}
if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
logrus.Debugf("Registry version header: '%s'", hdr)
info.Version = hdr
}
logrus.Debugf("PingResult.Version: %q", info.Version)
standalone := resp.Header.Get("X-Docker-Registry-Standalone")
logrus.Debugf("Registry standalone header: '%s'", standalone)
// Accepted values are "true" (case-insensitive) and "1".
if strings.EqualFold(standalone, "true") || standalone == "1" {
info.Standalone = true
} else if len(standalone) > 0 {
// there is a header set, and it is not "true" or "1", so assume fails
info.Standalone = false
}
logrus.Debugf("PingResult.Standalone: %t", info.Standalone)
return info, nil
}

31
vendor/github.com/docker/docker/registry/errors.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
package registry // import "github.com/docker/docker/registry"
import (
"net/url"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/docker/errdefs"
)
type notFoundError string
func (e notFoundError) Error() string {
return string(e)
}
func (notFoundError) NotFound() {}
func translateV2AuthError(err error) error {
switch e := err.(type) {
case *url.Error:
switch e2 := e.Err.(type) {
case errcode.Error:
switch e2.Code {
case errcode.ErrorCodeUnauthorized:
return errdefs.Unauthorized(err)
}
}
}
return err
}

210
vendor/github.com/docker/docker/registry/registry.go generated vendored Normal file
View File

@@ -0,0 +1,210 @@
// Package registry contains client primitives to interact with a remote Docker registry.
package registry // import "github.com/docker/docker/registry"
import (
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/rootless"
"github.com/docker/go-connections/tlsconfig"
"github.com/sirupsen/logrus"
)
var (
// ErrAlreadyExists is an error returned if an image being pushed
// already exists on the remote side
ErrAlreadyExists = errors.New("Image already exists")
)
// HostCertsDir returns the config directory for a specific host
func HostCertsDir(hostname string) (string, error) {
certsDir := CertsDir
if rootless.RunningWithRootlessKit() {
configHome, err := homedir.GetConfigHome()
if err != nil {
return "", err
}
certsDir = filepath.Join(configHome, "docker/certs.d")
}
hostDir := filepath.Join(certsDir, cleanPath(hostname))
return hostDir, nil
}
func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) {
// PreferredServerCipherSuites should have no effect
tlsConfig := tlsconfig.ServerDefault()
tlsConfig.InsecureSkipVerify = !isSecure
if isSecure && CertsDir != "" {
hostDir, err := HostCertsDir(hostname)
if err != nil {
return nil, err
}
logrus.Debugf("hostDir: %s", hostDir)
if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil {
return nil, err
}
}
return tlsConfig, nil
}
func hasFile(files []os.FileInfo, name string) bool {
for _, f := range files {
if f.Name() == name {
return true
}
}
return false
}
// ReadCertsDirectory reads the directory for TLS certificates
// including roots and certificate pairs and updates the
// provided TLS configuration.
func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
fs, err := ioutil.ReadDir(directory)
if err != nil && !os.IsNotExist(err) {
return err
}
for _, f := range fs {
if strings.HasSuffix(f.Name(), ".crt") {
if tlsConfig.RootCAs == nil {
systemPool, err := tlsconfig.SystemCertPool()
if err != nil {
return fmt.Errorf("unable to get system cert pool: %v", err)
}
tlsConfig.RootCAs = systemPool
}
logrus.Debugf("crt: %s", filepath.Join(directory, f.Name()))
data, err := ioutil.ReadFile(filepath.Join(directory, f.Name()))
if err != nil {
return err
}
tlsConfig.RootCAs.AppendCertsFromPEM(data)
}
if strings.HasSuffix(f.Name(), ".cert") {
certName := f.Name()
keyName := certName[:len(certName)-5] + ".key"
logrus.Debugf("cert: %s", filepath.Join(directory, f.Name()))
if !hasFile(fs, keyName) {
return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
}
cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName))
if err != nil {
return err
}
tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
}
if strings.HasSuffix(f.Name(), ".key") {
keyName := f.Name()
certName := keyName[:len(keyName)-4] + ".cert"
logrus.Debugf("key: %s", filepath.Join(directory, f.Name()))
if !hasFile(fs, certName) {
return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName)
}
}
}
return nil
}
// Headers returns request modifiers with a User-Agent and metaHeaders
func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier {
modifiers := []transport.RequestModifier{}
if userAgent != "" {
modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{
"User-Agent": []string{userAgent},
}))
}
if metaHeaders != nil {
modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders))
}
return modifiers
}
// HTTPClient returns an HTTP client structure which uses the given transport
// and contains the necessary headers for redirected requests
func HTTPClient(transport http.RoundTripper) *http.Client {
return &http.Client{
Transport: transport,
CheckRedirect: addRequiredHeadersToRedirectedRequests,
}
}
func trustedLocation(req *http.Request) bool {
var (
trusteds = []string{"docker.com", "docker.io"}
hostname = strings.SplitN(req.Host, ":", 2)[0]
)
if req.URL.Scheme != "https" {
return false
}
for _, trusted := range trusteds {
if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) {
return true
}
}
return false
}
// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers
// for redirected requests
func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error {
if len(via) != 0 && via[0] != nil {
if trustedLocation(req) && trustedLocation(via[0]) {
req.Header = via[0].Header
return nil
}
for k, v := range via[0].Header {
if k != "Authorization" {
for _, vv := range v {
req.Header.Add(k, vv)
}
}
}
}
return nil
}
// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the
// default TLS configuration.
func NewTransport(tlsConfig *tls.Config) *http.Transport {
if tlsConfig == nil {
tlsConfig = tlsconfig.ServerDefault()
}
direct := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
base := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: direct.DialContext,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
DisableKeepAlives: true,
}
return base
}

View File

@@ -0,0 +1,96 @@
package resumable // import "github.com/docker/docker/registry/resumable"
import (
"fmt"
"io"
"net/http"
"time"
"github.com/sirupsen/logrus"
)
type requestReader struct {
client *http.Client
request *http.Request
lastRange int64
totalSize int64
currentResponse *http.Response
failures uint32
maxFailures uint32
waitDuration time.Duration
}
// NewRequestReader makes it possible to resume reading a request's body transparently
// maxfail is the number of times we retry to make requests again (not resumes)
// totalsize is the total length of the body; auto detect if not provided
func NewRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser {
return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second}
}
// NewRequestReaderWithInitialResponse makes it possible to resume
// reading the body of an already initiated request.
func NewRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second}
}
func (r *requestReader) Read(p []byte) (n int, err error) {
if r.client == nil || r.request == nil {
return 0, fmt.Errorf("client and request can't be nil")
}
isFreshRequest := false
if r.lastRange != 0 && r.currentResponse == nil {
readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize)
r.request.Header.Set("Range", readRange)
time.Sleep(r.waitDuration)
}
if r.currentResponse == nil {
r.currentResponse, err = r.client.Do(r.request)
isFreshRequest = true
}
if err != nil && r.failures+1 != r.maxFailures {
r.cleanUpResponse()
r.failures++
time.Sleep(time.Duration(r.failures) * r.waitDuration)
return 0, nil
} else if err != nil {
r.cleanUpResponse()
return 0, err
}
if r.currentResponse.StatusCode == http.StatusRequestedRangeNotSatisfiable && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 {
r.cleanUpResponse()
return 0, io.EOF
} else if r.currentResponse.StatusCode != http.StatusPartialContent && r.lastRange != 0 && isFreshRequest {
r.cleanUpResponse()
return 0, fmt.Errorf("the server doesn't support byte ranges")
}
if r.totalSize == 0 {
r.totalSize = r.currentResponse.ContentLength
} else if r.totalSize <= 0 {
r.cleanUpResponse()
return 0, fmt.Errorf("failed to auto detect content length")
}
n, err = r.currentResponse.Body.Read(p)
r.lastRange += int64(n)
if err != nil {
r.cleanUpResponse()
}
if err != nil && err != io.EOF {
logrus.Infof("encountered error during pull and clearing it before resume: %s", err)
err = nil
}
return n, err
}
func (r *requestReader) Close() error {
r.cleanUpResponse()
r.client = nil
r.request = nil
return nil
}
func (r *requestReader) cleanUpResponse() {
if r.currentResponse != nil {
r.currentResponse.Body.Close()
r.currentResponse = nil
}
}

313
vendor/github.com/docker/docker/registry/service.go generated vendored Normal file
View File

@@ -0,0 +1,313 @@
package registry // import "github.com/docker/docker/registry"
import (
"context"
"crypto/tls"
"net/http"
"net/url"
"strings"
"sync"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// DefaultSearchLimit is the default value for maximum number of returned search results.
DefaultSearchLimit = 25
)
// Service is the interface defining what a registry service should implement.
type Service interface {
Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error)
LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error)
LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error)
ResolveRepository(name reference.Named) (*RepositoryInfo, error)
Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error)
ServiceConfig() *registrytypes.ServiceConfig
TLSConfig(hostname string) (*tls.Config, error)
LoadAllowNondistributableArtifacts([]string) error
LoadMirrors([]string) error
LoadInsecureRegistries([]string) error
}
// DefaultService is a registry service. It tracks configuration data such as a list
// of mirrors.
type DefaultService struct {
config *serviceConfig
mu sync.Mutex
}
// NewService returns a new instance of DefaultService ready to be
// installed into an engine.
func NewService(options ServiceOptions) (*DefaultService, error) {
config, err := newServiceConfig(options)
return &DefaultService{config: config}, err
}
// ServiceConfig returns the public registry service configuration.
func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig {
s.mu.Lock()
defer s.mu.Unlock()
servConfig := registrytypes.ServiceConfig{
AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0),
AllowNondistributableArtifactsHostnames: make([]string, 0),
InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0),
IndexConfigs: make(map[string]*(registrytypes.IndexInfo)),
Mirrors: make([]string, 0),
}
// construct a new ServiceConfig which will not retrieve s.Config directly,
// and look up items in s.config with mu locked
servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...)
servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...)
servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...)
for key, value := range s.config.ServiceConfig.IndexConfigs {
servConfig.IndexConfigs[key] = value
}
servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...)
return &servConfig
}
// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service.
func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error {
s.mu.Lock()
defer s.mu.Unlock()
return s.config.LoadAllowNondistributableArtifacts(registries)
}
// LoadMirrors loads registry mirrors for Service
func (s *DefaultService) LoadMirrors(mirrors []string) error {
s.mu.Lock()
defer s.mu.Unlock()
return s.config.LoadMirrors(mirrors)
}
// LoadInsecureRegistries loads insecure registries for Service
func (s *DefaultService) LoadInsecureRegistries(registries []string) error {
s.mu.Lock()
defer s.mu.Unlock()
return s.config.LoadInsecureRegistries(registries)
}
// Auth contacts the public registry with the provided credentials,
// and returns OK if authentication was successful.
// It can be used to verify the validity of a client's credentials.
func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) {
// TODO Use ctx when searching for repositories
serverAddress := authConfig.ServerAddress
if serverAddress == "" {
serverAddress = IndexServer
}
if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") {
serverAddress = "https://" + serverAddress
}
u, err := url.Parse(serverAddress)
if err != nil {
return "", "", errdefs.InvalidParameter(errors.Errorf("unable to parse server address: %v", err))
}
endpoints, err := s.LookupPushEndpoints(u.Host)
if err != nil {
return "", "", errdefs.InvalidParameter(err)
}
for _, endpoint := range endpoints {
login := loginV2
if endpoint.Version == APIVersion1 {
login = loginV1
}
status, token, err = login(authConfig, endpoint, userAgent)
if err == nil {
return
}
if fErr, ok := err.(fallbackError); ok {
err = fErr.err
logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err)
continue
}
return "", "", err
}
return "", "", err
}
// splitReposSearchTerm breaks a search term into an index name and remote name
func splitReposSearchTerm(reposName string) (string, string) {
nameParts := strings.SplitN(reposName, "/", 2)
var indexName, remoteName string
if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") &&
!strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") {
// This is a Docker Index repos (ex: samalba/hipache or ubuntu)
// 'docker.io'
indexName = IndexName
remoteName = reposName
} else {
indexName = nameParts[0]
remoteName = nameParts[1]
}
return indexName, remoteName
}
// Search queries the public registry for images matching the specified
// search terms, and returns the results.
func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) {
// TODO Use ctx when searching for repositories
if err := validateNoScheme(term); err != nil {
return nil, err
}
indexName, remoteName := splitReposSearchTerm(term)
// Search is a long-running operation, just lock s.config to avoid block others.
s.mu.Lock()
index, err := newIndexInfo(s.config, indexName)
s.mu.Unlock()
if err != nil {
return nil, err
}
// *TODO: Search multiple indexes.
endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers))
if err != nil {
return nil, err
}
var client *http.Client
if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" {
creds := NewStaticCredentialStore(authConfig)
scopes := []auth.Scope{
auth.RegistryScope{
Name: "catalog",
Actions: []string{"search"},
},
}
modifiers := Headers(userAgent, nil)
v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes)
if err != nil {
if fErr, ok := err.(fallbackError); ok {
logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err)
} else {
return nil, err
}
} else if foundV2 {
// Copy non transport http client features
v2Client.Timeout = endpoint.client.Timeout
v2Client.CheckRedirect = endpoint.client.CheckRedirect
v2Client.Jar = endpoint.client.Jar
logrus.Debugf("using v2 client for search to %s", endpoint.URL)
client = v2Client
}
}
if client == nil {
client = endpoint.client
if err := authorizeClient(client, authConfig, endpoint); err != nil {
return nil, err
}
}
r := newSession(client, authConfig, endpoint)
if index.Official {
localName := remoteName
if strings.HasPrefix(localName, "library/") {
// If pull "library/foo", it's stored locally under "foo"
localName = strings.SplitN(localName, "/", 2)[1]
}
return r.SearchRepositories(localName, limit)
}
return r.SearchRepositories(remoteName, limit)
}
// ResolveRepository splits a repository name into its components
// and configuration of the associated registry.
func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) {
s.mu.Lock()
defer s.mu.Unlock()
return newRepositoryInfo(s.config, name)
}
// APIEndpoint represents a remote API endpoint
type APIEndpoint struct {
Mirror bool
URL *url.URL
Version APIVersion
AllowNondistributableArtifacts bool
Official bool
TrimHostname bool
TLSConfig *tls.Config
}
// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint
func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint {
return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders)
}
// TLSConfig constructs a client TLS configuration based on server defaults
func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) {
s.mu.Lock()
defer s.mu.Unlock()
return newTLSConfig(hostname, isSecureIndex(s.config, hostname))
}
// tlsConfig constructs a client TLS configuration based on server defaults
func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) {
return newTLSConfig(hostname, isSecureIndex(s.config, hostname))
}
func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) {
return s.tlsConfig(mirrorURL.Host)
}
// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference.
// It gives preference to v2 endpoints over v1, mirrors over the actual
// registry, and HTTPS over plain HTTP.
func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
s.mu.Lock()
defer s.mu.Unlock()
return s.lookupEndpoints(hostname)
}
// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference.
// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP.
// Mirrors are not included.
func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
s.mu.Lock()
defer s.mu.Unlock()
allEndpoints, err := s.lookupEndpoints(hostname)
if err == nil {
for _, endpoint := range allEndpoints {
if !endpoint.Mirror {
endpoints = append(endpoints, endpoint)
}
}
}
return endpoints, err
}
func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
return s.lookupV2Endpoints(hostname)
}

82
vendor/github.com/docker/docker/registry/service_v2.go generated vendored Normal file
View File

@@ -0,0 +1,82 @@
package registry // import "github.com/docker/docker/registry"
import (
"net/url"
"strings"
"github.com/docker/go-connections/tlsconfig"
)
func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) {
tlsConfig := tlsconfig.ServerDefault()
if hostname == DefaultNamespace || hostname == IndexHostname {
// v2 mirrors
for _, mirror := range s.config.Mirrors {
if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") {
mirror = "https://" + mirror
}
mirrorURL, err := url.Parse(mirror)
if err != nil {
return nil, err
}
mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL)
if err != nil {
return nil, err
}
endpoints = append(endpoints, APIEndpoint{
URL: mirrorURL,
// guess mirrors are v2
Version: APIVersion2,
Mirror: true,
TrimHostname: true,
TLSConfig: mirrorTLSConfig,
})
}
// v2 registry
endpoints = append(endpoints, APIEndpoint{
URL: DefaultV2Registry,
Version: APIVersion2,
Official: true,
TrimHostname: true,
TLSConfig: tlsConfig,
})
return endpoints, nil
}
ana := allowNondistributableArtifacts(s.config, hostname)
tlsConfig, err = s.tlsConfig(hostname)
if err != nil {
return nil, err
}
endpoints = []APIEndpoint{
{
URL: &url.URL{
Scheme: "https",
Host: hostname,
},
Version: APIVersion2,
AllowNondistributableArtifacts: ana,
TrimHostname: true,
TLSConfig: tlsConfig,
},
}
if tlsConfig.InsecureSkipVerify {
endpoints = append(endpoints, APIEndpoint{
URL: &url.URL{
Scheme: "http",
Host: hostname,
},
Version: APIVersion2,
AllowNondistributableArtifacts: ana,
TrimHostname: true,
// used to check if supposed to be secure via InsecureSkipVerify
TLSConfig: tlsConfig,
})
}
return endpoints, nil
}

782
vendor/github.com/docker/docker/registry/session.go generated vendored Normal file
View File

@@ -0,0 +1,782 @@
package registry // import "github.com/docker/docker/registry"
import (
"bytes"
"crypto/sha256"
// this is required for some certificates
_ "crypto/sha512"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/url"
"strconv"
"strings"
"sync"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/tarsum"
"github.com/docker/docker/registry/resumable"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var (
// ErrRepoNotFound is returned if the repository didn't exist on the
// remote side
ErrRepoNotFound notFoundError = "Repository not found"
)
// A Session is used to communicate with a V1 registry
type Session struct {
indexEndpoint *V1Endpoint
client *http.Client
// TODO(tiborvass): remove authConfig
authConfig *types.AuthConfig
id string
}
type authTransport struct {
http.RoundTripper
*types.AuthConfig
alwaysSetBasicAuth bool
token []string
mu sync.Mutex // guards modReq
modReq map[*http.Request]*http.Request // original -> modified
}
// AuthTransport handles the auth layer when communicating with a v1 registry (private or official)
//
// For private v1 registries, set alwaysSetBasicAuth to true.
//
// For the official v1 registry, if there isn't already an Authorization header in the request,
// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header.
// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing
// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent
// requests.
//
// If the server sends a token without the client having requested it, it is ignored.
//
// This RoundTripper also has a CancelRequest method important for correct timeout handling.
func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper {
if base == nil {
base = http.DefaultTransport
}
return &authTransport{
RoundTripper: base,
AuthConfig: authConfig,
alwaysSetBasicAuth: alwaysSetBasicAuth,
modReq: make(map[*http.Request]*http.Request),
}
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
return r2
}
// RoundTrip changes an HTTP request's headers to add the necessary
// authentication-related headers
func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
// Authorization should not be set on 302 redirect for untrusted locations.
// This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests.
// As the authorization logic is currently implemented in RoundTrip,
// a 302 redirect is detected by looking at the Referrer header as go http package adds said header.
// This is safe as Docker doesn't set Referrer in other scenarios.
if orig.Header.Get("Referer") != "" && !trustedLocation(orig) {
return tr.RoundTripper.RoundTrip(orig)
}
req := cloneRequest(orig)
tr.mu.Lock()
tr.modReq[orig] = req
tr.mu.Unlock()
if tr.alwaysSetBasicAuth {
if tr.AuthConfig == nil {
return nil, errors.New("unexpected error: empty auth config")
}
req.SetBasicAuth(tr.Username, tr.Password)
return tr.RoundTripper.RoundTrip(req)
}
// Don't override
if req.Header.Get("Authorization") == "" {
if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 {
req.SetBasicAuth(tr.Username, tr.Password)
} else if len(tr.token) > 0 {
req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ","))
}
}
resp, err := tr.RoundTripper.RoundTrip(req)
if err != nil {
tr.mu.Lock()
delete(tr.modReq, orig)
tr.mu.Unlock()
return nil, err
}
if len(resp.Header["X-Docker-Token"]) > 0 {
tr.token = resp.Header["X-Docker-Token"]
}
resp.Body = &ioutils.OnEOFReader{
Rc: resp.Body,
Fn: func() {
tr.mu.Lock()
delete(tr.modReq, orig)
tr.mu.Unlock()
},
}
return resp, nil
}
// CancelRequest cancels an in-flight request by closing its connection.
func (tr *authTransport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := tr.RoundTripper.(canceler); ok {
tr.mu.Lock()
modReq := tr.modReq[req]
delete(tr.modReq, req)
tr.mu.Unlock()
cr.CancelRequest(modReq)
}
}
func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error {
var alwaysSetBasicAuth bool
// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
// alongside all our requests.
if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" {
info, err := endpoint.Ping()
if err != nil {
return err
}
if info.Standalone && authConfig != nil {
logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String())
alwaysSetBasicAuth = true
}
}
// Annotate the transport unconditionally so that v2 can
// properly fallback on v1 when an image is not found.
client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth)
jar, err := cookiejar.New(nil)
if err != nil {
return errors.New("cookiejar.New is not supposed to return an error")
}
client.Jar = jar
return nil
}
func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session {
return &Session{
authConfig: authConfig,
client: client,
indexEndpoint: endpoint,
id: stringid.GenerateRandomID(),
}
}
// NewSession creates a new session
// TODO(tiborvass): remove authConfig param once registry client v2 is vendored
func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) {
if err := authorizeClient(client, authConfig, endpoint); err != nil {
return nil, err
}
return newSession(client, authConfig, endpoint), nil
}
// ID returns this registry session's ID.
func (r *Session) ID() string {
return r.id
}
// GetRemoteHistory retrieves the history of a given image from the registry.
// It returns a list of the parent's JSON files (including the requested image).
func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) {
res, err := r.client.Get(registry + "images/" + imgID + "/ancestry")
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
if res.StatusCode == http.StatusUnauthorized {
return nil, errcode.ErrorCodeUnauthorized.WithArgs()
}
return nil, newJSONError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res)
}
var history []string
if err := json.NewDecoder(res.Body).Decode(&history); err != nil {
return nil, fmt.Errorf("Error while reading the http response: %v", err)
}
logrus.Debugf("Ancestry: %v", history)
return history, nil
}
// LookupRemoteImage checks if an image exists in the registry
func (r *Session) LookupRemoteImage(imgID, registry string) error {
res, err := r.client.Get(registry + "images/" + imgID + "/json")
if err != nil {
return err
}
res.Body.Close()
if res.StatusCode != http.StatusOK {
return newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
}
return nil
}
// GetRemoteImageJSON retrieves an image's JSON metadata from the registry.
func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) {
res, err := r.client.Get(registry + "images/" + imgID + "/json")
if err != nil {
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, -1, newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
}
// if the size header is not present, then set it to '-1'
imageSize := int64(-1)
if hdr := res.Header.Get("X-Docker-Size"); hdr != "" {
imageSize, err = strconv.ParseInt(hdr, 10, 64)
if err != nil {
return nil, -1, err
}
}
jsonString, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString)
}
return jsonString, imageSize, nil
}
// GetRemoteImageLayer retrieves an image layer from the registry
func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) {
var (
statusCode = 0
res *http.Response
err error
imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID)
)
req, err := http.NewRequest(http.MethodGet, imageURL, nil)
if err != nil {
return nil, fmt.Errorf("Error while getting from the server: %v", err)
}
res, err = r.client.Do(req)
if err != nil {
logrus.Debugf("Error contacting registry %s: %v", registry, err)
// the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515
if res != nil {
if res.Body != nil {
res.Body.Close()
}
statusCode = res.StatusCode
}
return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
statusCode, imgID)
}
if res.StatusCode != http.StatusOK {
res.Body.Close()
return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
res.StatusCode, imgID)
}
if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
logrus.Debug("server supports resume")
return resumable.NewRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil
}
logrus.Debug("server doesn't support resume")
return res.Body, nil
}
// GetRemoteTag retrieves the tag named in the askedTag argument from the given
// repository. It queries each of the registries supplied in the registries
// argument, and returns data from the first one that answers the query
// successfully.
func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) {
repository := reference.Path(repositoryRef)
if strings.Count(repository, "/") == 0 {
// This will be removed once the registry supports auto-resolution on
// the "library" namespace
repository = "library/" + repository
}
for _, host := range registries {
endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag)
res, err := r.client.Get(endpoint)
if err != nil {
return "", err
}
logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
defer res.Body.Close()
if res.StatusCode == 404 {
return "", ErrRepoNotFound
}
if res.StatusCode != http.StatusOK {
continue
}
var tagID string
if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil {
return "", err
}
return tagID, nil
}
return "", fmt.Errorf("Could not reach any registry endpoint")
}
// GetRemoteTags retrieves all tags from the given repository. It queries each
// of the registries supplied in the registries argument, and returns data from
// the first one that answers the query successfully. It returns a map with
// tag names as the keys and image IDs as the values.
func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) {
repository := reference.Path(repositoryRef)
if strings.Count(repository, "/") == 0 {
// This will be removed once the registry supports auto-resolution on
// the "library" namespace
repository = "library/" + repository
}
for _, host := range registries {
endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
res, err := r.client.Get(endpoint)
if err != nil {
return nil, err
}
logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
defer res.Body.Close()
if res.StatusCode == 404 {
return nil, ErrRepoNotFound
}
if res.StatusCode != http.StatusOK {
continue
}
result := make(map[string]string)
if err := json.NewDecoder(res.Body).Decode(&result); err != nil {
return nil, err
}
return result, nil
}
return nil, fmt.Errorf("Could not reach any registry endpoint")
}
func buildEndpointsList(headers []string, indexEp string) ([]string, error) {
var endpoints []string
parsedURL, err := url.Parse(indexEp)
if err != nil {
return nil, err
}
var urlScheme = parsedURL.Scheme
// The registry's URL scheme has to match the Index'
for _, ep := range headers {
epList := strings.Split(ep, ",")
for _, epListElement := range epList {
endpoints = append(
endpoints,
fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement)))
}
}
return endpoints, nil
}
// GetRepositoryData returns lists of images and endpoints for the repository
func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) {
repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), reference.Path(name))
logrus.Debugf("[registry] Calling GET %s", repositoryTarget)
req, err := http.NewRequest(http.MethodGet, repositoryTarget, nil)
if err != nil {
return nil, err
}
// this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
req.Header.Set("X-Docker-Token", "true")
res, err := r.client.Do(req)
if err != nil {
// check if the error is because of i/o timeout
// and return a non-obtuse error message for users
// "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout"
// was a top search on the docker user forum
if isTimeout(err) {
return nil, fmt.Errorf("network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy", repositoryTarget)
}
return nil, fmt.Errorf("Error while pulling image: %v", err)
}
defer res.Body.Close()
if res.StatusCode == http.StatusUnauthorized {
return nil, errcode.ErrorCodeUnauthorized.WithArgs()
}
// TODO: Right now we're ignoring checksums in the response body.
// In the future, we need to use them to check image validity.
if res.StatusCode == 404 {
return nil, newJSONError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res)
} else if res.StatusCode != http.StatusOK {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
logrus.Debugf("Error reading response body: %s", err)
}
return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res)
}
var endpoints []string
if res.Header.Get("X-Docker-Endpoints") != "" {
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String())
if err != nil {
return nil, err
}
} else {
// Assume the endpoint is on the same host
endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host))
}
remoteChecksums := []*ImgData{}
if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil {
return nil, err
}
// Forge a better object from the retrieved data
imgsData := make(map[string]*ImgData, len(remoteChecksums))
for _, elem := range remoteChecksums {
imgsData[elem.ID] = elem
}
return &RepositoryData{
ImgList: imgsData,
Endpoints: endpoints,
}, nil
}
// PushImageChecksumRegistry uploads checksums for an image
func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error {
u := registry + "images/" + imgData.ID + "/checksum"
logrus.Debugf("[registry] Calling PUT %s", u)
req, err := http.NewRequest(http.MethodPut, u, nil)
if err != nil {
return err
}
req.Header.Set("X-Docker-Checksum", imgData.Checksum)
req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload)
res, err := r.client.Do(req)
if err != nil {
return fmt.Errorf("Failed to upload metadata: %v", err)
}
defer res.Body.Close()
if len(res.Cookies()) > 0 {
r.client.Jar.SetCookies(req.URL, res.Cookies())
}
if res.StatusCode != http.StatusOK {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
}
var jsonBody map[string]string
if err := json.Unmarshal(errBody, &jsonBody); err != nil {
errBody = []byte(err.Error())
} else if jsonBody["error"] == "Image already exists" {
return ErrAlreadyExists
}
return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody)
}
return nil
}
// PushImageJSONRegistry pushes JSON metadata for a local image to the registry
func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error {
u := registry + "images/" + imgData.ID + "/json"
logrus.Debugf("[registry] Calling PUT %s", u)
req, err := http.NewRequest(http.MethodPut, u, bytes.NewReader(jsonRaw))
if err != nil {
return err
}
req.Header.Add("Content-type", "application/json")
res, err := r.client.Do(req)
if err != nil {
return fmt.Errorf("Failed to upload metadata: %s", err)
}
defer res.Body.Close()
if res.StatusCode == http.StatusUnauthorized && strings.HasPrefix(registry, "http://") {
return newJSONError("HTTP code 401, Docker will not send auth headers over HTTP.", res)
}
if res.StatusCode != http.StatusOK {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
}
var jsonBody map[string]string
if err := json.Unmarshal(errBody, &jsonBody); err != nil {
errBody = []byte(err.Error())
} else if jsonBody["error"] == "Image already exists" {
return ErrAlreadyExists
}
return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res)
}
return nil
}
// PushImageLayerRegistry sends the checksum of an image layer to the registry
func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) {
u := registry + "images/" + imgID + "/layer"
logrus.Debugf("[registry] Calling PUT %s", u)
tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0)
if err != nil {
return "", "", err
}
h := sha256.New()
h.Write(jsonRaw)
h.Write([]byte{'\n'})
checksumLayer := io.TeeReader(tarsumLayer, h)
req, err := http.NewRequest(http.MethodPut, u, checksumLayer)
if err != nil {
return "", "", err
}
req.Header.Add("Content-Type", "application/octet-stream")
req.ContentLength = -1
req.TransferEncoding = []string{"chunked"}
res, err := r.client.Do(req)
if err != nil {
return "", "", fmt.Errorf("Failed to upload layer: %v", err)
}
if rc, ok := layer.(io.Closer); ok {
if err := rc.Close(); err != nil {
return "", "", err
}
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
}
return "", "", newJSONError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res)
}
checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil))
return tarsumLayer.Sum(jsonRaw), checksumPayload, nil
}
// PushRegistryTag pushes a tag on the registry.
// Remote has the format '<user>/<repo>
func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error {
// "jsonify" the string
revision = "\"" + revision + "\""
path := fmt.Sprintf("repositories/%s/tags/%s", reference.Path(remote), tag)
req, err := http.NewRequest(http.MethodPut, registry+path, strings.NewReader(revision))
if err != nil {
return err
}
req.Header.Add("Content-type", "application/json")
req.ContentLength = int64(len(revision))
res, err := r.client.Do(req)
if err != nil {
return err
}
res.Body.Close()
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
return newJSONError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res)
}
return nil
}
// PushImageJSONIndex uploads an image list to the repository
func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) {
cleanImgList := []*ImgData{}
if validate {
for _, elem := range imgList {
if elem.Checksum != "" {
cleanImgList = append(cleanImgList, elem)
}
}
} else {
cleanImgList = imgList
}
imgListJSON, err := json.Marshal(cleanImgList)
if err != nil {
return nil, err
}
var suffix string
if validate {
suffix = "images"
}
u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), reference.Path(remote), suffix)
logrus.Debugf("[registry] PUT %s", u)
logrus.Debugf("Image list pushed to index:\n%s", imgListJSON)
headers := map[string][]string{
"Content-type": {"application/json"},
// this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
"X-Docker-Token": {"true"},
}
if validate {
headers["X-Docker-Endpoints"] = regs
}
// Redirect if necessary
var res *http.Response
for {
if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil {
return nil, err
}
if !shouldRedirect(res) {
break
}
res.Body.Close()
u = res.Header.Get("Location")
logrus.Debugf("Redirected to %s", u)
}
defer res.Body.Close()
if res.StatusCode == http.StatusUnauthorized {
return nil, errcode.ErrorCodeUnauthorized.WithArgs()
}
var tokens, endpoints []string
if !validate {
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
logrus.Debugf("Error reading response body: %s", err)
}
return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res)
}
tokens = res.Header["X-Docker-Token"]
logrus.Debugf("Auth token: %v", tokens)
if res.Header.Get("X-Docker-Endpoints") == "" {
return nil, fmt.Errorf("Index response didn't contain any endpoints")
}
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String())
if err != nil {
return nil, err
}
} else {
if res.StatusCode != http.StatusNoContent {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
logrus.Debugf("Error reading response body: %s", err)
}
return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res)
}
}
return &RepositoryData{
Endpoints: endpoints,
}, nil
}
func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) {
req, err := http.NewRequest(http.MethodPut, u, bytes.NewReader(body))
if err != nil {
return nil, err
}
req.ContentLength = int64(len(body))
for k, v := range headers {
req.Header[k] = v
}
response, err := r.client.Do(req)
if err != nil {
return nil, err
}
return response, nil
}
func shouldRedirect(response *http.Response) bool {
return response.StatusCode >= 300 && response.StatusCode < 400
}
// SearchRepositories performs a search against the remote repository
func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) {
if limit < 1 || limit > 100 {
return nil, errdefs.InvalidParameter(errors.Errorf("Limit %d is outside the range of [1, 100]", limit))
}
logrus.Debugf("Index server: %s", r.indexEndpoint)
u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit))
req, err := http.NewRequest(http.MethodGet, u, nil)
if err != nil {
return nil, errors.Wrap(errdefs.InvalidParameter(err), "Error building request")
}
// Have the AuthTransport send authentication, when logged in.
req.Header.Set("X-Docker-Token", "true")
res, err := r.client.Do(req)
if err != nil {
return nil, errdefs.System(err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, newJSONError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res)
}
result := new(registrytypes.SearchResults)
return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results")
}
func isTimeout(err error) bool {
type timeout interface {
Timeout() bool
}
e := err
switch urlErr := err.(type) {
case *url.Error:
e = urlErr.Err
}
t, ok := e.(timeout)
return ok && t.Timeout()
}
func newJSONError(msg string, res *http.Response) error {
return &jsonmessage.JSONError{
Message: msg,
Code: res.StatusCode,
}
}

70
vendor/github.com/docker/docker/registry/types.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
package registry // import "github.com/docker/docker/registry"
import (
"github.com/docker/distribution/reference"
registrytypes "github.com/docker/docker/api/types/registry"
)
// RepositoryData tracks the image list, list of endpoints for a repository
type RepositoryData struct {
// ImgList is a list of images in the repository
ImgList map[string]*ImgData
// Endpoints is a list of endpoints returned in X-Docker-Endpoints
Endpoints []string
}
// ImgData is used to transfer image checksums to and from the registry
type ImgData struct {
// ID is an opaque string that identifies the image
ID string `json:"id"`
Checksum string `json:"checksum,omitempty"`
ChecksumPayload string `json:"-"`
Tag string `json:",omitempty"`
}
// PingResult contains the information returned when pinging a registry. It
// indicates the registry's version and whether the registry claims to be a
// standalone registry.
type PingResult struct {
// Version is the registry version supplied by the registry in an HTTP
// header
Version string `json:"version"`
// Standalone is set to true if the registry indicates it is a
// standalone registry in the X-Docker-Registry-Standalone
// header
Standalone bool `json:"standalone"`
}
// APIVersion is an integral representation of an API version (presently
// either 1 or 2)
type APIVersion int
func (av APIVersion) String() string {
return apiVersions[av]
}
// API Version identifiers.
const (
_ = iota
APIVersion1 APIVersion = iota
APIVersion2
)
var apiVersions = map[APIVersion]string{
APIVersion1: "v1",
APIVersion2: "v2",
}
// RepositoryInfo describes a repository
type RepositoryInfo struct {
Name reference.Named
// Index points to registry information
Index *registrytypes.IndexInfo
// Official indicates whether the repository is considered official.
// If the registry is official, and the normalized name does not
// contain a '/' (e.g. "foo"), then it is considered an official repo.
Official bool
// Class represents the class of the repository, such as "plugin"
// or "image".
Class string
}

25
vendor/github.com/docker/docker/rootless/rootless.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
package rootless // import "github.com/docker/docker/rootless"
import (
"os"
"sync"
)
const (
// RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy
RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy"
)
var (
runningWithRootlessKit bool
runningWithRootlessKitOnce sync.Once
)
// RunningWithRootlessKit returns true if running under RootlessKit namespaces.
func RunningWithRootlessKit() bool {
runningWithRootlessKitOnce.Do(func() {
u := os.Getenv("ROOTLESSKIT_STATE_DIR")
runningWithRootlessKit = u != ""
})
return runningWithRootlessKit
}

View File

@@ -0,0 +1,18 @@
// +build go1.7
package tlsconfig
import (
"crypto/x509"
"runtime"
)
// SystemCertPool returns a copy of the system cert pool,
// returns an error if failed to load or empty pool on windows.
func SystemCertPool() (*x509.CertPool, error) {
certpool, err := x509.SystemCertPool()
if err != nil && runtime.GOOS == "windows" {
return x509.NewCertPool(), nil
}
return certpool, err
}

View File

@@ -0,0 +1,13 @@
// +build !go1.7
package tlsconfig
import (
"crypto/x509"
)
// SystemCertPool returns an new empty cert pool,
// accessing system cert pool is supported in go 1.7
func SystemCertPool() (*x509.CertPool, error) {
return x509.NewCertPool(), nil
}

View File

@@ -0,0 +1,254 @@
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
//
// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
// A Config may be reused; the tls package will also not modify it.
package tlsconfig
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"github.com/pkg/errors"
)
// Options represents the information needed to create client and server TLS configurations.
type Options struct {
CAFile string
// If either CertFile or KeyFile is empty, Client() will not load them
// preventing the client from authenticating to the server.
// However, Server() requires them and will error out if they are empty.
CertFile string
KeyFile string
// client-only option
InsecureSkipVerify bool
// server-only option
ClientAuth tls.ClientAuthType
// If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS
// creds will include exclusively the roots in that CA file. If no CA file is provided,
// the system pool will be used.
ExclusiveRootPools bool
MinVersion uint16
// If Passphrase is set, it will be used to decrypt a TLS private key
// if the key is encrypted
Passphrase string
}
// Extra (server-side) accepted CBC cipher suites - will phase out in the future
var acceptedCBCCiphers = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
}
// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
// options struct but wants to use a commonly accepted set of TLS cipher suites, with
// known weak algorithms removed.
var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
// allTLSVersions lists all the TLS versions and is used by the code that validates
// a uint16 value as a TLS version.
var allTLSVersions = map[uint16]struct{}{
tls.VersionSSL30: {},
tls.VersionTLS10: {},
tls.VersionTLS11: {},
tls.VersionTLS12: {},
}
// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
func ServerDefault(ops ...func(*tls.Config)) *tls.Config {
tlsconfig := &tls.Config{
// Avoid fallback by default to SSL protocols < TLS1.2
MinVersion: tls.VersionTLS12,
PreferServerCipherSuites: true,
CipherSuites: DefaultServerAcceptedCiphers,
}
for _, op := range ops {
op(tlsconfig)
}
return tlsconfig
}
// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
func ClientDefault(ops ...func(*tls.Config)) *tls.Config {
tlsconfig := &tls.Config{
// Prefer TLS1.2 as the client minimum
MinVersion: tls.VersionTLS12,
CipherSuites: clientCipherSuites,
}
for _, op := range ops {
op(tlsconfig)
}
return tlsconfig
}
// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) {
// If we should verify the server, we need to load a trusted ca
var (
certPool *x509.CertPool
err error
)
if exclusivePool {
certPool = x509.NewCertPool()
} else {
certPool, err = SystemCertPool()
if err != nil {
return nil, fmt.Errorf("failed to read system certificates: %v", err)
}
}
pem, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err)
}
if !certPool.AppendCertsFromPEM(pem) {
return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
}
return certPool, nil
}
// isValidMinVersion checks that the input value is a valid tls minimum version
func isValidMinVersion(version uint16) bool {
_, ok := allTLSVersions[version]
return ok
}
// adjustMinVersion sets the MinVersion on `config`, the input configuration.
// It assumes the current MinVersion on the `config` is the lowest allowed.
func adjustMinVersion(options Options, config *tls.Config) error {
if options.MinVersion > 0 {
if !isValidMinVersion(options.MinVersion) {
return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion)
}
if options.MinVersion < config.MinVersion {
return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion)
}
config.MinVersion = options.MinVersion
}
return nil
}
// IsErrEncryptedKey returns true if the 'err' is an error of incorrect
// password when tryin to decrypt a TLS private key
func IsErrEncryptedKey(err error) bool {
return errors.Cause(err) == x509.IncorrectPasswordError
}
// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format.
// If the private key is encrypted, 'passphrase' is used to decrypted the
// private key.
func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) {
// this section makes some small changes to code from notary/tuf/utils/x509.go
pemBlock, _ := pem.Decode(keyBytes)
if pemBlock == nil {
return nil, fmt.Errorf("no valid private key found")
}
var err error
if x509.IsEncryptedPEMBlock(pemBlock) {
keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase))
if err != nil {
return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
}
keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
}
return keyBytes, nil
}
// getCert returns a Certificate from the CertFile and KeyFile in 'options',
// if the key is encrypted, the Passphrase in 'options' will be used to
// decrypt it.
func getCert(options Options) ([]tls.Certificate, error) {
if options.CertFile == "" && options.KeyFile == "" {
return nil, nil
}
errMessage := "Could not load X509 key pair"
cert, err := ioutil.ReadFile(options.CertFile)
if err != nil {
return nil, errors.Wrap(err, errMessage)
}
prKeyBytes, err := ioutil.ReadFile(options.KeyFile)
if err != nil {
return nil, errors.Wrap(err, errMessage)
}
prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase)
if err != nil {
return nil, errors.Wrap(err, errMessage)
}
tlsCert, err := tls.X509KeyPair(cert, prKeyBytes)
if err != nil {
return nil, errors.Wrap(err, errMessage)
}
return []tls.Certificate{tlsCert}, nil
}
// Client returns a TLS configuration meant to be used by a client.
func Client(options Options) (*tls.Config, error) {
tlsConfig := ClientDefault()
tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
if !options.InsecureSkipVerify && options.CAFile != "" {
CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
if err != nil {
return nil, err
}
tlsConfig.RootCAs = CAs
}
tlsCerts, err := getCert(options)
if err != nil {
return nil, err
}
tlsConfig.Certificates = tlsCerts
if err := adjustMinVersion(options, tlsConfig); err != nil {
return nil, err
}
return tlsConfig, nil
}
// Server returns a TLS configuration meant to be used by a server.
func Server(options Options) (*tls.Config, error) {
tlsConfig := ServerDefault()
tlsConfig.ClientAuth = options.ClientAuth
tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
if err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
}
return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
}
tlsConfig.Certificates = []tls.Certificate{tlsCert}
if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" {
CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
if err != nil {
return nil, err
}
tlsConfig.ClientCAs = CAs
}
if err := adjustMinVersion(options, tlsConfig); err != nil {
return nil, err
}
return tlsConfig, nil
}

View File

@@ -0,0 +1,17 @@
// +build go1.5
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
//
package tlsconfig
import (
"crypto/tls"
)
// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
var clientCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}

View File

@@ -0,0 +1,15 @@
// +build !go1.5
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
//
package tlsconfig
import (
"crypto/tls"
)
// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
var clientCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}

55
vendor/github.com/docker/go-metrics/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,55 @@
# Contributing
## Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.

191
vendor/github.com/docker/go-metrics/LICENSE.code generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

425
vendor/github.com/docker/go-metrics/LICENSE.docs generated vendored Normal file
View File

@@ -0,0 +1,425 @@
Attribution-ShareAlike 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution-ShareAlike 4.0 International Public
License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-ShareAlike 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. BY-SA Compatible License means a license listed at
creativecommons.org/compatiblelicenses, approved by Creative
Commons as essentially the equivalent of this Public License.
d. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
e. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
f. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
g. License Elements means the license attributes listed in the name
of a Creative Commons Public License. The License Elements of this
Public License are Attribution and ShareAlike.
h. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
i. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
j. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
k. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
l. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
m. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public licenses.
Notwithstanding, Creative Commons may elect to apply one of its public
licenses to material it publishes and in those instances will be
considered the "Licensor." Except for the limited purpose of indicating
that material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the public
licenses.
Creative Commons may be contacted at creativecommons.org.

16
vendor/github.com/docker/go-metrics/NOTICE generated vendored Normal file
View File

@@ -0,0 +1,16 @@
Docker
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

91
vendor/github.com/docker/go-metrics/README.md generated vendored Normal file
View File

@@ -0,0 +1,91 @@
# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics)
This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects.
## Best Practices
This packages is meant to be used for collecting metrics in Docker projects.
It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected.
If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/).
The following are a few Docker specific rules that will help you name and work with metrics in your project.
1. Namespace and Subsystem
This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics.
```go
ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{
"version": dockerversion.Version,
"commit": dockerversion.GitCommit,
})
```
In the example above we are creating metrics for the Docker engine's daemon package.
`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics.
A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting.
2. Declaring your Metrics
Try to keep all your metric declarations in one file.
This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created.
3. Use labels instead of multiple metrics
Labels allow you to define one metric such as the time it takes to perform a certain action on an object.
If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action.
```go
containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action")
```
The last parameter is the label name or key.
When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for.
```go
containerActions.WithValues("create").UpdateSince(start)
```
4. Always use a unit
The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with.
For a timer, the standard unit is seconds and a counter's standard unit is a total.
For gauges you must provide the unit.
This package provides a standard set of units for use within the Docker projects.
```go
Nanoseconds Unit = "nanoseconds"
Seconds Unit = "seconds"
Bytes Unit = "bytes"
Total Unit = "total"
```
If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds.
## Docs
Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics).
## HTTP Metrics
To instrument a http handler, you can wrap the code like this:
```go
namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"})
httpMetrics := namespace.NewDefaultHttpMetrics()
metrics.Register(namespace)
instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler)
```
Note: The `handler` label must be provided when a new namespace is created.
## Additional Metrics
Additional metrics are also defined here that are not available in the prometheus client.
If you need a custom metrics and it is generic enough to be used by multiple projects, define it here.
## Copyright and license
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.

52
vendor/github.com/docker/go-metrics/counter.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package metrics
import "github.com/prometheus/client_golang/prometheus"
// Counter is a metrics that can only increment its current count
type Counter interface {
// Inc adds Sum(vs) to the counter. Sum(vs) must be positive.
//
// If len(vs) == 0, increments the counter by 1.
Inc(vs ...float64)
}
// LabeledCounter is counter that must have labels populated before use.
type LabeledCounter interface {
WithValues(vs ...string) Counter
}
type labeledCounter struct {
pc *prometheus.CounterVec
}
func (lc *labeledCounter) WithValues(vs ...string) Counter {
return &counter{pc: lc.pc.WithLabelValues(vs...)}
}
func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) {
lc.pc.Describe(ch)
}
func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) {
lc.pc.Collect(ch)
}
type counter struct {
pc prometheus.Counter
}
func (c *counter) Inc(vs ...float64) {
if len(vs) == 0 {
c.pc.Inc()
}
c.pc.Add(sumFloat64(vs...))
}
func (c *counter) Describe(ch chan<- *prometheus.Desc) {
c.pc.Describe(ch)
}
func (c *counter) Collect(ch chan<- prometheus.Metric) {
c.pc.Collect(ch)
}

3
vendor/github.com/docker/go-metrics/docs.go generated vendored Normal file
View File

@@ -0,0 +1,3 @@
// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects.
package metrics

72
vendor/github.com/docker/go-metrics/gauge.go generated vendored Normal file
View File

@@ -0,0 +1,72 @@
package metrics
import "github.com/prometheus/client_golang/prometheus"
// Gauge is a metric that allows incrementing and decrementing a value
type Gauge interface {
Inc(...float64)
Dec(...float64)
// Add adds the provided value to the gauge's current value
Add(float64)
// Set replaces the gauge's current value with the provided value
Set(float64)
}
// LabeledGauge describes a gauge the must have values populated before use.
type LabeledGauge interface {
WithValues(labels ...string) Gauge
}
type labeledGauge struct {
pg *prometheus.GaugeVec
}
func (lg *labeledGauge) WithValues(labels ...string) Gauge {
return &gauge{pg: lg.pg.WithLabelValues(labels...)}
}
func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) {
lg.pg.Describe(c)
}
func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) {
lg.pg.Collect(c)
}
type gauge struct {
pg prometheus.Gauge
}
func (g *gauge) Inc(vs ...float64) {
if len(vs) == 0 {
g.pg.Inc()
}
g.Add(sumFloat64(vs...))
}
func (g *gauge) Dec(vs ...float64) {
if len(vs) == 0 {
g.pg.Dec()
}
g.Add(-sumFloat64(vs...))
}
func (g *gauge) Add(v float64) {
g.pg.Add(v)
}
func (g *gauge) Set(v float64) {
g.pg.Set(v)
}
func (g *gauge) Describe(c chan<- *prometheus.Desc) {
g.pg.Describe(c)
}
func (g *gauge) Collect(c chan<- prometheus.Metric) {
g.pg.Collect(c)
}

74
vendor/github.com/docker/go-metrics/handler.go generated vendored Normal file
View File

@@ -0,0 +1,74 @@
package metrics
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// HTTPHandlerOpts describes a set of configurable options of http metrics
type HTTPHandlerOpts struct {
DurationBuckets []float64
RequestSizeBuckets []float64
ResponseSizeBuckets []float64
}
const (
InstrumentHandlerResponseSize = iota
InstrumentHandlerRequestSize
InstrumentHandlerDuration
InstrumentHandlerCounter
InstrumentHandlerInFlight
)
type HTTPMetric struct {
prometheus.Collector
handlerType int
}
var (
defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60}
defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G
defaultResponseSizeBuckets = defaultRequestSizeBuckets
)
// Handler returns the global http.Handler that provides the prometheus
// metrics format on GET requests. This handler is no longer instrumented.
func Handler() http.Handler {
return promhttp.Handler()
}
func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(metrics, handler.ServeHTTP)
}
func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc {
var handler http.Handler
handler = http.HandlerFunc(handlerFunc)
for _, metric := range metrics {
switch metric.handlerType {
case InstrumentHandlerResponseSize:
if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
handler = promhttp.InstrumentHandlerResponseSize(collector, handler)
}
case InstrumentHandlerRequestSize:
if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
handler = promhttp.InstrumentHandlerRequestSize(collector, handler)
}
case InstrumentHandlerDuration:
if collector, ok := metric.Collector.(prometheus.ObserverVec); ok {
handler = promhttp.InstrumentHandlerDuration(collector, handler)
}
case InstrumentHandlerCounter:
if collector, ok := metric.Collector.(*prometheus.CounterVec); ok {
handler = promhttp.InstrumentHandlerCounter(collector, handler)
}
case InstrumentHandlerInFlight:
if collector, ok := metric.Collector.(prometheus.Gauge); ok {
handler = promhttp.InstrumentHandlerInFlight(collector, handler)
}
}
}
return handler.ServeHTTP
}

10
vendor/github.com/docker/go-metrics/helpers.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
package metrics
func sumFloat64(vs ...float64) float64 {
var sum float64
for _, v := range vs {
sum += v
}
return sum
}

315
vendor/github.com/docker/go-metrics/namespace.go generated vendored Normal file
View File

@@ -0,0 +1,315 @@
package metrics
import (
"fmt"
"sync"
"github.com/prometheus/client_golang/prometheus"
)
type Labels map[string]string
// NewNamespace returns a namespaces that is responsible for managing a collection of
// metrics for a particual namespace and subsystem
//
// labels allows const labels to be added to all metrics created in this namespace
// and are commonly used for data like application version and git commit
func NewNamespace(name, subsystem string, labels Labels) *Namespace {
if labels == nil {
labels = make(map[string]string)
}
return &Namespace{
name: name,
subsystem: subsystem,
labels: labels,
}
}
// Namespace describes a set of metrics that share a namespace and subsystem.
type Namespace struct {
name string
subsystem string
labels Labels
mu sync.Mutex
metrics []prometheus.Collector
}
// WithConstLabels returns a namespace with the provided set of labels merged
// with the existing constant labels on the namespace.
//
// Only metrics created with the returned namespace will get the new constant
// labels. The returned namespace must be registered separately.
func (n *Namespace) WithConstLabels(labels Labels) *Namespace {
n.mu.Lock()
ns := &Namespace{
name: n.name,
subsystem: n.subsystem,
labels: mergeLabels(n.labels, labels),
}
n.mu.Unlock()
return ns
}
func (n *Namespace) NewCounter(name, help string) Counter {
c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))}
n.Add(c)
return c
}
func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter {
c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)}
n.Add(c)
return c
}
func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts {
return prometheus.CounterOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: makeName(name, Total),
Help: help,
ConstLabels: prometheus.Labels(n.labels),
}
}
func (n *Namespace) NewTimer(name, help string) Timer {
t := &timer{
m: prometheus.NewHistogram(n.newTimerOpts(name, help)),
}
n.Add(t)
return t
}
func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer {
t := &labeledTimer{
m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels),
}
n.Add(t)
return t
}
func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts {
return prometheus.HistogramOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: makeName(name, Seconds),
Help: help,
ConstLabels: prometheus.Labels(n.labels),
}
}
func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge {
g := &gauge{
pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)),
}
n.Add(g)
return g
}
func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge {
g := &labeledGauge{
pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels),
}
n.Add(g)
return g
}
func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts {
return prometheus.GaugeOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: makeName(name, unit),
Help: help,
ConstLabels: prometheus.Labels(n.labels),
}
}
func (n *Namespace) Describe(ch chan<- *prometheus.Desc) {
n.mu.Lock()
defer n.mu.Unlock()
for _, metric := range n.metrics {
metric.Describe(ch)
}
}
func (n *Namespace) Collect(ch chan<- prometheus.Metric) {
n.mu.Lock()
defer n.mu.Unlock()
for _, metric := range n.metrics {
metric.Collect(ch)
}
}
func (n *Namespace) Add(collector prometheus.Collector) {
n.mu.Lock()
n.metrics = append(n.metrics, collector)
n.mu.Unlock()
}
func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc {
name = makeName(name, unit)
namespace := n.name
if n.subsystem != "" {
namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem)
}
name = fmt.Sprintf("%s_%s", namespace, name)
return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels))
}
// mergeLabels merges two or more labels objects into a single map, favoring
// the later labels.
func mergeLabels(lbs ...Labels) Labels {
merged := make(Labels)
for _, target := range lbs {
for k, v := range target {
merged[k] = v
}
}
return merged
}
func makeName(name string, unit Unit) string {
if unit == "" {
return name
}
return fmt.Sprintf("%s_%s", name, unit)
}
func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric {
return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
DurationBuckets: defaultDurationBuckets,
RequestSizeBuckets: defaultResponseSizeBuckets,
ResponseSizeBuckets: defaultResponseSizeBuckets,
})
}
func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric {
return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{
DurationBuckets: durationBuckets,
RequestSizeBuckets: requestSizeBuckets,
ResponseSizeBuckets: responseSizeBuckets,
})
}
func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric {
var httpMetrics []*HTTPMetric
inFlightMetric := n.NewInFlightGaugeMetric(handlerName)
requestTotalMetric := n.NewRequestTotalMetric(handlerName)
requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets)
requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets)
responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets)
httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric)
return httpMetrics
}
func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric {
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
metric := prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "in_flight_requests",
Help: "The in-flight HTTP requests",
ConstLabels: prometheus.Labels(labels),
})
httpMetric := &HTTPMetric{
Collector: metric,
handlerType: InstrumentHandlerInFlight,
}
n.Add(httpMetric)
return httpMetric
}
func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric {
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
metric := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "requests_total",
Help: "Total number of HTTP requests made.",
ConstLabels: prometheus.Labels(labels),
},
[]string{"code", "method"},
)
httpMetric := &HTTPMetric{
Collector: metric,
handlerType: InstrumentHandlerCounter,
}
n.Add(httpMetric)
return httpMetric
}
func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric {
if len(buckets) == 0 {
panic("DurationBuckets must be provided")
}
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
opts := prometheus.HistogramOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "request_duration_seconds",
Help: "The HTTP request latencies in seconds.",
Buckets: buckets,
ConstLabels: prometheus.Labels(labels),
}
metric := prometheus.NewHistogramVec(opts, []string{"method"})
httpMetric := &HTTPMetric{
Collector: metric,
handlerType: InstrumentHandlerDuration,
}
n.Add(httpMetric)
return httpMetric
}
func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
if len(buckets) == 0 {
panic("RequestSizeBuckets must be provided")
}
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
opts := prometheus.HistogramOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "request_size_bytes",
Help: "The HTTP request sizes in bytes.",
Buckets: buckets,
ConstLabels: prometheus.Labels(labels),
}
metric := prometheus.NewHistogramVec(opts, []string{})
httpMetric := &HTTPMetric{
Collector: metric,
handlerType: InstrumentHandlerRequestSize,
}
n.Add(httpMetric)
return httpMetric
}
func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric {
if len(buckets) == 0 {
panic("ResponseSizeBuckets must be provided")
}
labels := prometheus.Labels(n.labels)
labels["handler"] = handlerName
opts := prometheus.HistogramOpts{
Namespace: n.name,
Subsystem: n.subsystem,
Name: "response_size_bytes",
Help: "The HTTP response sizes in bytes.",
Buckets: buckets,
ConstLabels: prometheus.Labels(labels),
}
metrics := prometheus.NewHistogramVec(opts, []string{})
httpMetric := &HTTPMetric{
Collector: metrics,
handlerType: InstrumentHandlerResponseSize,
}
n.Add(httpMetric)
return httpMetric
}

15
vendor/github.com/docker/go-metrics/register.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
package metrics
import "github.com/prometheus/client_golang/prometheus"
// Register adds all the metrics in the provided namespace to the global
// metrics registry
func Register(n *Namespace) {
prometheus.MustRegister(n)
}
// Deregister removes all the metrics in the provided namespace from the
// global metrics registry
func Deregister(n *Namespace) {
prometheus.Unregister(n)
}

85
vendor/github.com/docker/go-metrics/timer.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
package metrics
import (
"time"
"github.com/prometheus/client_golang/prometheus"
)
// StartTimer begins a timer observation at the callsite. When the target
// operation is completed, the caller should call the return done func().
func StartTimer(timer Timer) (done func()) {
start := time.Now()
return func() {
timer.Update(time.Since(start))
}
}
// Timer is a metric that allows collecting the duration of an action in seconds
type Timer interface {
// Update records an observation, duration, and converts to the target
// units.
Update(duration time.Duration)
// UpdateSince will add the duration from the provided starting time to the
// timer's summary with the precisions that was used in creation of the timer
UpdateSince(time.Time)
}
// LabeledTimer is a timer that must have label values populated before use.
type LabeledTimer interface {
WithValues(labels ...string) *labeledTimerObserver
}
type labeledTimer struct {
m *prometheus.HistogramVec
}
type labeledTimerObserver struct {
m prometheus.Observer
}
func (lbo *labeledTimerObserver) Update(duration time.Duration) {
lbo.m.Observe(duration.Seconds())
}
func (lbo *labeledTimerObserver) UpdateSince(since time.Time) {
lbo.m.Observe(time.Since(since).Seconds())
}
func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver {
return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)}
}
func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) {
lt.m.Describe(c)
}
func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) {
lt.m.Collect(c)
}
type timer struct {
m prometheus.Observer
}
func (t *timer) Update(duration time.Duration) {
t.m.Observe(duration.Seconds())
}
func (t *timer) UpdateSince(since time.Time) {
t.m.Observe(time.Since(since).Seconds())
}
func (t *timer) Describe(c chan<- *prometheus.Desc) {
c <- t.m.(prometheus.Metric).Desc()
}
func (t *timer) Collect(c chan<- prometheus.Metric) {
// Are there any observers that don't implement Collector? It is really
// unclear what the point of the upstream change was, but we'll let this
// panic if we get an observer that doesn't implement collector. In this
// case, we should almost always see metricVec objects, so this should
// never panic.
t.m.(prometheus.Collector).Collect(c)
}

12
vendor/github.com/docker/go-metrics/unit.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
package metrics
// Unit represents the type or precision of a metric that is appended to
// the metrics fully qualified name
type Unit string
const (
Nanoseconds Unit = "nanoseconds"
Seconds Unit = "seconds"
Bytes Unit = "bytes"
Total Unit = "total"
)

27
vendor/github.com/docker/go/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

1168
vendor/github.com/docker/go/canonical/json/decode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1250
vendor/github.com/docker/go/canonical/json/encode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

143
vendor/github.com/docker/go/canonical/json/fold.go generated vendored Normal file
View File

@@ -0,0 +1,143 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}

141
vendor/github.com/docker/go/canonical/json/indent.go generated vendored Normal file
View File

@@ -0,0 +1,141 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import "bytes"
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
var scan scanner
scan.reset()
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(&scan, c)
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, to make it easier to embed inside other formatted JSON data.
// Although leading space characters (space, tab, carriage return, newline)
// at the beginning of src are dropped, trailing space characters
// at the end of src are preserved and copied to dst.
// For example, if src has no trailing spaces, neither will dst;
// if src ends in a trailing newline, so will dst.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
var scan scanner
scan.reset()
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(&scan, c)
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}

623
vendor/github.com/docker/go/canonical/json/scanner.go generated vendored Normal file
View File

@@ -0,0 +1,623 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, nextValue, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import "strconv"
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, c) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// nextValue splits data after the next whole JSON value,
// returning that value and the bytes that follow it as separate slices.
// scan is passed in for use by nextValue to avoid an allocation.
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
scan.reset()
for i, c := range data {
v := scan.step(scan, c)
if v >= scanEndObject {
switch v {
// probe the scanner with a space to determine whether we will
// get scanEnd on the next character. Otherwise, if the next character
// is not a space, scanEndTop allocates a needless error.
case scanEndObject, scanEndArray:
if scan.step(scan, ' ') == scanEnd {
return data[:i+1], data[i+1:], nil
}
case scanError:
return nil, nil, scan.err
case scanEnd:
return data[:i], data[i:], nil
}
}
}
if scan.eof() == scanError {
return nil, nil, scan.err
}
return data, nil, nil
}
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset() and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, byte) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// 1-byte redo (see undo method)
redo bool
redoCode int
redoState func(*scanner, byte) int
// total bytes consumed, updated by decoder.Decode
bytes int64
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
)
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.redo = false
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
func (s *scanner) pushParseState(p int) {
s.parseState = append(s.parseState, p)
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
s.redo = false
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c byte) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
s.pushParseState(parseObjectKey)
return scanBeginObject
case '[':
s.step = stateBeginValueOrEmpty
s.pushParseState(parseArrayValue)
return scanBeginArray
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 't': // beginning of true
s.step = stateT
return scanBeginLiteral
case 'f': // beginning of false
s.step = stateF
return scanBeginLiteral
case 'n': // beginning of null
s.step = stateN
return scanBeginLiteral
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
return s.error(c, "looking for beginning of value")
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c byte) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if c <= ' ' && isSpace(c) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginString
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValue
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c byte) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c byte) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
case 'u':
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c byte) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c byte) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c byte) int {
if c == '+' || c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
return stateEndValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c byte) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c byte) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c byte) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c byte) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c byte) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c byte) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c byte) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c byte) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c byte) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c byte, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}
// undo causes the scanner to return scanCode from the next state transition.
// This gives callers a simple 1-byte undo mechanism.
func (s *scanner) undo(scanCode int) {
if s.redo {
panic("json: invalid use of scanner")
}
s.redoCode = scanCode
s.redoState = s.step
s.step = stateRedo
s.redo = true
}
// stateRedo helps implement the scanner's 1-byte undo.
func stateRedo(s *scanner, c byte) int {
s.redo = false
s.step = s.redoState
return s.redoCode
}

487
vendor/github.com/docker/go/canonical/json/stream.go generated vendored Normal file
View File

@@ -0,0 +1,487 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON objects from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v interface{}) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value"}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
for {
// Look in the buffer for a new value.
for i, c := range dec.buf[scanp:] {
dec.scan.bytes++
v := dec.scan.step(&dec.scan, c)
if v == scanEnd {
scanp += i
break Input
}
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
scanp += i + 1
break Input
}
if v == scanError {
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
scanp = len(dec.buf)
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(c) {
return true
}
}
return false
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
err error
canonical bool
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w}
}
// Canonical causes the encoder to switch to Canonical JSON mode.
// Read more at: http://wiki.laptop.org/go/Canonical_JSON
func (enc *Encoder) Canonical() { enc.canonical = true }
// Encode writes the JSON encoding of v to the stream,
// followed by a newline character.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState(enc.canonical)
err := e.marshal(v)
if err != nil {
return err
}
if !enc.canonical {
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
}
if _, err = enc.w.Write(e.Bytes()); err != nil {
enc.err = err
}
encodeStatePool.Put(e)
return err
}
// RawMessage is a raw encoded JSON object.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns *m as the JSON encoding of m.
func (m *RawMessage) MarshalJSON() ([]byte, error) {
return *m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types:
//
// Delim, for the four JSON delimiters [ ] { }
// bool, for JSON booleans
// float64, for JSON numbers
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
//
type Token interface{}
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", 0}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", 0}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type Delim
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
clearOffset(err)
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x interface{}
if err := dec.Decode(&x); err != nil {
clearOffset(err)
return nil, err
}
return x, nil
}
}
}
func clearOffset(err error) {
if s, ok := err.(*SyntaxError); ok {
s.Offset = 0
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(c) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
/*
TODO
// EncodeToken writes the given JSON token to the stream.
// It returns an error if the delimiters [ ] { } are not properly used.
//
// EncodeToken does not call Flush, because usually it is part of
// a larger operation such as Encode, and those will call Flush when finished.
// Callers that create an Encoder and then invoke EncodeToken directly,
// without using Encode, need to call Flush when finished to ensure that
// the JSON is written to the underlying writer.
func (e *Encoder) EncodeToken(t Token) error {
...
}
*/

44
vendor/github.com/docker/go/canonical/json/tags.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}