Vendor after merging mtrmac/image:ParseNormalizedNamed

… and use the master branch of docker/distribution which provides
docker/distribution/reference.ParseNormalizedNamed.
This commit is contained in:
Miloslav Trmač 2017-01-19 22:37:14 +01:00
parent 8602471486
commit 2f8cc39a1a
26 changed files with 1079 additions and 557 deletions

View File

@ -19,7 +19,7 @@ github.com/gorilla/mux e444e69cbd
github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
golang.org/x/net master https://github.com/golang/net.git golang.org/x/net master https://github.com/golang/net.git
# end docker deps # end docker deps
github.com/docker/distribution 07f32ac1831ed0fc71960b7da5d6bb83cb6881b5 github.com/docker/distribution master
github.com/docker/libtrust master github.com/docker/libtrust master
github.com/opencontainers/runc master github.com/opencontainers/runc master
github.com/opencontainers/image-spec v1.0.0-rc3 github.com/opencontainers/image-spec v1.0.0-rc3

View File

@ -56,7 +56,7 @@ type Canonical interface {
// returned. // returned.
// If an error was encountered it is returned, along with a nil Reference. // If an error was encountered it is returned, along with a nil Reference.
func ParseNamed(s string) (Named, error) { func ParseNamed(s string) (Named, error) {
named, err := distreference.ParseNamed(s) named, err := distreference.ParseNormalizedNamed(s)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", s) return nil, errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", s)
} }

View File

@ -8,8 +8,8 @@ import (
"time" "time"
"github.com/docker/distribution/context" "github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
) )
var ( var (
@ -192,6 +192,18 @@ type BlobCreateOption interface {
Apply(interface{}) error Apply(interface{}) error
} }
// CreateOptions is a collection of blob creation modifiers relevant to general
// blob storage intended to be configured by the BlobCreateOption.Apply method.
type CreateOptions struct {
Mount struct {
ShouldMount bool
From reference.Canonical
// Stat allows to pass precalculated descriptor to link and return.
// Blob access check will be skipped if set.
Stat *Descriptor
}
}
// BlobWriter provides a handle for inserting data into a blob store. // BlobWriter provides a handle for inserting data into a blob store.
// Instances should be obtained from BlobWriteService.Writer and // Instances should be obtained from BlobWriteService.Writer and
// BlobWriteService.Resume. If supported by the store, a writer can be // BlobWriteService.Resume. If supported by the store, a writer can be

View File

@ -103,20 +103,22 @@ func GetRequestID(ctx Context) string {
// WithResponseWriter returns a new context and response writer that makes // WithResponseWriter returns a new context and response writer that makes
// interesting response statistics available within the context. // interesting response statistics available within the context.
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
irw := instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
}
if closeNotifier, ok := w.(http.CloseNotifier); ok { if closeNotifier, ok := w.(http.CloseNotifier); ok {
irwCN := &instrumentedResponseWriterCN{ irwCN := &instrumentedResponseWriterCN{
instrumentedResponseWriter: irw, instrumentedResponseWriter: instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
},
CloseNotifier: closeNotifier, CloseNotifier: closeNotifier,
} }
return irwCN, irwCN return irwCN, irwCN
} }
irw := instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
}
return &irw, &irw return &irw, &irw
} }

View File

@ -1,139 +0,0 @@
package digest
import (
"fmt"
"hash"
"io"
"regexp"
"strings"
)
const (
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
// Digest allows simple protection of hex formatted digest strings, prefixed
// by their algorithm. Strings of type Digest have some guarantee of being in
// the correct format and it provides quick access to the components of a
// digest string.
//
// The following is an example of the contents of Digest types:
//
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
//
// This allows to abstract the digest behind this type and work only in those
// terms.
type Digest string
// NewDigest returns a Digest from alg and a hash.Hash object.
func NewDigest(alg Algorithm, h hash.Hash) Digest {
return NewDigestFromBytes(alg, h.Sum(nil))
}
// NewDigestFromBytes returns a new digest from the byte contents of p.
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
// functions. This is also useful for rebuilding digests from binary
// serializations.
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
return Digest(fmt.Sprintf("%s:%x", alg, p))
}
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
func NewDigestFromHex(alg, hex string) Digest {
return Digest(fmt.Sprintf("%s:%s", alg, hex))
}
// DigestRegexp matches valid digest types.
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
var (
// ErrDigestInvalidFormat returned when digest format invalid.
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
// ErrDigestInvalidLength returned when digest has invalid length.
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
)
// ParseDigest parses s and returns the validated digest object. An error will
// be returned if the format is invalid.
func ParseDigest(s string) (Digest, error) {
d := Digest(s)
return d, d.Validate()
}
// FromReader returns the most valid digest for the underlying content using
// the canonical digest algorithm.
func FromReader(rd io.Reader) (Digest, error) {
return Canonical.FromReader(rd)
}
// FromBytes digests the input and returns a Digest.
func FromBytes(p []byte) Digest {
return Canonical.FromBytes(p)
}
// Validate checks that the contents of d is a valid digest, returning an
// error if not.
func (d Digest) Validate() error {
s := string(d)
if !DigestRegexpAnchored.MatchString(s) {
return ErrDigestInvalidFormat
}
i := strings.Index(s, ":")
if i < 0 {
return ErrDigestInvalidFormat
}
// case: "sha256:" with no hex.
if i+1 == len(s) {
return ErrDigestInvalidFormat
}
switch algorithm := Algorithm(s[:i]); algorithm {
case SHA256, SHA384, SHA512:
if algorithm.Size()*2 != len(s[i+1:]) {
return ErrDigestInvalidLength
}
break
default:
return ErrDigestUnsupported
}
return nil
}
// Algorithm returns the algorithm portion of the digest. This will panic if
// the underlying digest is not in a valid format.
func (d Digest) Algorithm() Algorithm {
return Algorithm(d[:d.sepIndex()])
}
// Hex returns the hex digest portion of the digest. This will panic if the
// underlying digest is not in a valid format.
func (d Digest) Hex() string {
return string(d[d.sepIndex()+1:])
}
func (d Digest) String() string {
return string(d)
}
func (d Digest) sepIndex() int {
i := strings.Index(string(d), ":")
if i < 0 {
panic("could not find ':' in digest: " + d)
}
return i
}

View File

@ -1,155 +0,0 @@
package digest
import (
"crypto"
"fmt"
"hash"
"io"
)
// Algorithm identifies and implementation of a digester by an identifier.
// Note the that this defines both the hash algorithm used and the string
// encoding.
type Algorithm string
// supported digest types
const (
SHA256 Algorithm = "sha256" // sha256 with hex encoding
SHA384 Algorithm = "sha384" // sha384 with hex encoding
SHA512 Algorithm = "sha512" // sha512 with hex encoding
// Canonical is the primary digest algorithm used with the distribution
// project. Other digests may be used but this one is the primary storage
// digest.
Canonical = SHA256
)
var (
// TODO(stevvooe): Follow the pattern of the standard crypto package for
// registration of digests. Effectively, we are a registerable set and
// common symbol access.
// algorithms maps values to hash.Hash implementations. Other algorithms
// may be available but they cannot be calculated by the digest package.
algorithms = map[Algorithm]crypto.Hash{
SHA256: crypto.SHA256,
SHA384: crypto.SHA384,
SHA512: crypto.SHA512,
}
)
// Available returns true if the digest type is available for use. If this
// returns false, New and Hash will return nil.
func (a Algorithm) Available() bool {
h, ok := algorithms[a]
if !ok {
return false
}
// check availability of the hash, as well
return h.Available()
}
func (a Algorithm) String() string {
return string(a)
}
// Size returns number of bytes returned by the hash.
func (a Algorithm) Size() int {
h, ok := algorithms[a]
if !ok {
return 0
}
return h.Size()
}
// Set implemented to allow use of Algorithm as a command line flag.
func (a *Algorithm) Set(value string) error {
if value == "" {
*a = Canonical
} else {
// just do a type conversion, support is queried with Available.
*a = Algorithm(value)
}
return nil
}
// New returns a new digester for the specified algorithm. If the algorithm
// does not have a digester implementation, nil will be returned. This can be
// checked by calling Available before calling New.
func (a Algorithm) New() Digester {
return &digester{
alg: a,
hash: a.Hash(),
}
}
// Hash returns a new hash as used by the algorithm. If not available, the
// method will panic. Check Algorithm.Available() before calling.
func (a Algorithm) Hash() hash.Hash {
if !a.Available() {
// NOTE(stevvooe): A missing hash is usually a programming error that
// must be resolved at compile time. We don't import in the digest
// package to allow users to choose their hash implementation (such as
// when using stevvooe/resumable or a hardware accelerated package).
//
// Applications that may want to resolve the hash at runtime should
// call Algorithm.Available before call Algorithm.Hash().
panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
}
return algorithms[a].New()
}
// FromReader returns the digest of the reader using the algorithm.
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
digester := a.New()
if _, err := io.Copy(digester.Hash(), rd); err != nil {
return "", err
}
return digester.Digest(), nil
}
// FromBytes digests the input and returns a Digest.
func (a Algorithm) FromBytes(p []byte) Digest {
digester := a.New()
if _, err := digester.Hash().Write(p); err != nil {
// Writes to a Hash should never fail. None of the existing
// hash implementations in the stdlib or hashes vendored
// here can return errors from Write. Having a panic in this
// condition instead of having FromBytes return an error value
// avoids unnecessary error handling paths in all callers.
panic("write to hash function returned error: " + err.Error())
}
return digester.Digest()
}
// TODO(stevvooe): Allow resolution of verifiers using the digest type and
// this registration system.
// Digester calculates the digest of written data. Writes should go directly
// to the return value of Hash, while calling Digest will return the current
// value of the digest.
type Digester interface {
Hash() hash.Hash // provides direct access to underlying hash instance.
Digest() Digest
}
// digester provides a simple digester definition that embeds a hasher.
type digester struct {
alg Algorithm
hash hash.Hash
}
func (d *digester) Hash() hash.Hash {
return d.hash
}
func (d *digester) Digest() Digest {
return NewDigest(d.alg, d.hash)
}

View File

@ -1,42 +0,0 @@
// Package digest provides a generalized type to opaquely represent message
// digests and their operations within the registry. The Digest type is
// designed to serve as a flexible identifier in a content-addressable system.
// More importantly, it provides tools and wrappers to work with
// hash.Hash-based digests with little effort.
//
// Basics
//
// The format of a digest is simply a string with two parts, dubbed the
// "algorithm" and the "digest", separated by a colon:
//
// <algorithm>:<digest>
//
// An example of a sha256 digest representation follows:
//
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
//
// In this case, the string "sha256" is the algorithm and the hex bytes are
// the "digest".
//
// Because the Digest type is simply a string, once a valid Digest is
// obtained, comparisons are cheap, quick and simple to express with the
// standard equality operator.
//
// Verification
//
// The main benefit of using the Digest type is simple verification against a
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
// interface, provides a common write sink for digest verification. After
// writing is complete, calling the Verifier.Verified method will indicate
// whether or not the stream of bytes matches the target digest.
//
// Missing Features
//
// In addition to the above, we intend to add the following features to this
// package:
//
// 1. A Digester type that supports write sink digest calculation.
//
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
//
package digest

View File

@ -1,44 +0,0 @@
package digest
import (
"hash"
"io"
)
// Verifier presents a general verification interface to be used with message
// digests and other byte stream verifications. Users instantiate a Verifier
// from one of the various methods, write the data under test to it then check
// the result with the Verified method.
type Verifier interface {
io.Writer
// Verified will return true if the content written to Verifier matches
// the digest.
Verified() bool
}
// NewDigestVerifier returns a verifier that compares the written bytes
// against a passed in digest.
func NewDigestVerifier(d Digest) (Verifier, error) {
if err := d.Validate(); err != nil {
return nil, err
}
return hashVerifier{
hash: d.Algorithm().Hash(),
digest: d,
}, nil
}
type hashVerifier struct {
digest Digest
hash hash.Hash
}
func (hv hashVerifier) Write(p []byte) (n int, err error) {
return hv.hash.Write(p)
}
func (hv hashVerifier) Verified() bool {
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
}

View File

@ -1,10 +1,12 @@
package digest package digestset
import ( import (
"errors" "errors"
"sort" "sort"
"strings" "strings"
"sync" "sync"
digest "github.com/opencontainers/go-digest"
) )
var ( var (
@ -44,7 +46,7 @@ func NewSet() *Set {
// values or short values. This function does not test equality, // values or short values. This function does not test equality,
// rather whether the second value could match against the first // rather whether the second value could match against the first
// value. // value.
func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
if len(hex) == len(shortHex) { if len(hex) == len(shortHex) {
if hex != shortHex { if hex != shortHex {
return false return false
@ -64,7 +66,7 @@ func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
// If no digests could be found ErrDigestNotFound will be returned // If no digests could be found ErrDigestNotFound will be returned
// with an empty digest value. If multiple matches are found // with an empty digest value. If multiple matches are found
// ErrDigestAmbiguous will be returned with an empty digest value. // ErrDigestAmbiguous will be returned with an empty digest value.
func (dst *Set) Lookup(d string) (Digest, error) { func (dst *Set) Lookup(d string) (digest.Digest, error) {
dst.mutex.RLock() dst.mutex.RLock()
defer dst.mutex.RUnlock() defer dst.mutex.RUnlock()
if len(dst.entries) == 0 { if len(dst.entries) == 0 {
@ -72,11 +74,11 @@ func (dst *Set) Lookup(d string) (Digest, error) {
} }
var ( var (
searchFunc func(int) bool searchFunc func(int) bool
alg Algorithm alg digest.Algorithm
hex string hex string
) )
dgst, err := ParseDigest(d) dgst, err := digest.Parse(d)
if err == ErrDigestInvalidFormat { if err == digest.ErrDigestInvalidFormat {
hex = d hex = d
searchFunc = func(i int) bool { searchFunc = func(i int) bool {
return dst.entries[i].val >= d return dst.entries[i].val >= d
@ -108,7 +110,7 @@ func (dst *Set) Lookup(d string) (Digest, error) {
// Add adds the given digest to the set. An error will be returned // Add adds the given digest to the set. An error will be returned
// if the given digest is invalid. If the digest already exists in the // if the given digest is invalid. If the digest already exists in the
// set, this operation will be a no-op. // set, this operation will be a no-op.
func (dst *Set) Add(d Digest) error { func (dst *Set) Add(d digest.Digest) error {
if err := d.Validate(); err != nil { if err := d.Validate(); err != nil {
return err return err
} }
@ -139,7 +141,7 @@ func (dst *Set) Add(d Digest) error {
// Remove removes the given digest from the set. An err will be // Remove removes the given digest from the set. An err will be
// returned if the given digest is invalid. If the digest does // returned if the given digest is invalid. If the digest does
// not exist in the set, this operation will be a no-op. // not exist in the set, this operation will be a no-op.
func (dst *Set) Remove(d Digest) error { func (dst *Set) Remove(d digest.Digest) error {
if err := d.Validate(); err != nil { if err := d.Validate(); err != nil {
return err return err
} }
@ -167,10 +169,10 @@ func (dst *Set) Remove(d Digest) error {
} }
// All returns all the digests in the set // All returns all the digests in the set
func (dst *Set) All() []Digest { func (dst *Set) All() []digest.Digest {
dst.mutex.RLock() dst.mutex.RLock()
defer dst.mutex.RUnlock() defer dst.mutex.RUnlock()
retValues := make([]Digest, len(dst.entries)) retValues := make([]digest.Digest, len(dst.entries))
for i := range dst.entries { for i := range dst.entries {
retValues[i] = dst.entries[i].digest retValues[i] = dst.entries[i].digest
} }
@ -183,10 +185,10 @@ func (dst *Set) All() []Digest {
// entire value of digest if uniqueness cannot be achieved without the // entire value of digest if uniqueness cannot be achieved without the
// full value. This function will attempt to make short codes as short // full value. This function will attempt to make short codes as short
// as possible to be unique. // as possible to be unique.
func ShortCodeTable(dst *Set, length int) map[Digest]string { func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
dst.mutex.RLock() dst.mutex.RLock()
defer dst.mutex.RUnlock() defer dst.mutex.RUnlock()
m := make(map[Digest]string, len(dst.entries)) m := make(map[digest.Digest]string, len(dst.entries))
l := length l := length
resetIdx := 0 resetIdx := 0
for i := 0; i < len(dst.entries); i++ { for i := 0; i < len(dst.entries); i++ {
@ -222,9 +224,9 @@ func ShortCodeTable(dst *Set, length int) map[Digest]string {
} }
type digestEntry struct { type digestEntry struct {
alg Algorithm alg digest.Algorithm
val string val string
digest Digest digest digest.Digest
} }
type digestEntries []*digestEntry type digestEntries []*digestEntry

View File

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
) )
// ErrAccessDenied is returned when an access to a requested resource is // ErrAccessDenied is returned when an access to a requested resource is

View File

@ -5,20 +5,25 @@ import (
"mime" "mime"
"github.com/docker/distribution/context" "github.com/docker/distribution/context"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
) )
// Manifest represents a registry object specifying a set of // Manifest represents a registry object specifying a set of
// references and an optional target // references and an optional target
type Manifest interface { type Manifest interface {
// References returns a list of objects which make up this manifest. // References returns a list of objects which make up this manifest.
// The references are strictly ordered from base to head. A reference // A reference is anything which can be represented by a
// is anything which can be represented by a distribution.Descriptor // distribution.Descriptor. These can consist of layers, resources or other
// manifests.
//
// While no particular order is required, implementations should return
// them from highest to lowest priority. For example, one might want to
// return the base layer before the top layer.
References() []Descriptor References() []Descriptor
// Payload provides the serialized format of the manifest, in addition to // Payload provides the serialized format of the manifest, in addition to
// the media type. // the media type.
Payload() (mediatype string, payload []byte, err error) Payload() (mediaType string, payload []byte, err error)
} }
// ManifestBuilder creates a manifest allowing one to include dependencies. // ManifestBuilder creates a manifest allowing one to include dependencies.
@ -36,6 +41,9 @@ type ManifestBuilder interface {
// AppendReference includes the given object in the manifest after any // AppendReference includes the given object in the manifest after any
// existing dependencies. If the add fails, such as when adding an // existing dependencies. If the add fails, such as when adding an
// unsupported dependency, an error may be returned. // unsupported dependency, an error may be returned.
//
// The destination of the reference is dependent on the manifest type and
// the dependency type.
AppendReference(dependency Describable) error AppendReference(dependency Describable) error
} }
@ -86,20 +94,20 @@ var mappings = make(map[string]UnmarshalFunc, 0)
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
// Need to look up by the actual media type, not the raw contents of // Need to look up by the actual media type, not the raw contents of
// the header. Strip semicolons and anything following them. // the header. Strip semicolons and anything following them.
var mediatype string var mediaType string
if ctHeader != "" { if ctHeader != "" {
var err error var err error
mediatype, _, err = mime.ParseMediaType(ctHeader) mediaType, _, err = mime.ParseMediaType(ctHeader)
if err != nil { if err != nil {
return nil, Descriptor{}, err return nil, Descriptor{}, err
} }
} }
unmarshalFunc, ok := mappings[mediatype] unmarshalFunc, ok := mappings[mediaType]
if !ok { if !ok {
unmarshalFunc, ok = mappings[""] unmarshalFunc, ok = mappings[""]
if !ok { if !ok {
return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType)
} }
} }
@ -108,10 +116,10 @@ func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error)
// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This // RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
// should be called from specific // should be called from specific
func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
if _, ok := mappings[mediatype]; ok { if _, ok := mappings[mediaType]; ok {
return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
} }
mappings[mediatype] = u mappings[mediaType] = u
return nil return nil
} }

View File

@ -0,0 +1,42 @@
package reference
import "path"
// IsNameOnly returns true if reference only contains a repo name.
func IsNameOnly(ref Named) bool {
if _, ok := ref.(NamedTagged); ok {
return false
}
if _, ok := ref.(Canonical); ok {
return false
}
return true
}
// FamiliarName returns the familiar name string
// for the given named, familiarizing if needed.
func FamiliarName(ref Named) string {
if nn, ok := ref.(normalizedNamed); ok {
return nn.Familiar().Name()
}
return ref.Name()
}
// FamiliarString returns the familiar string representation
// for the given reference, familiarizing if needed.
func FamiliarString(ref Reference) string {
if nn, ok := ref.(normalizedNamed); ok {
return nn.Familiar().String()
}
return ref.String()
}
// FamiliarMatch reports whether ref matches the specified pattern.
// See https://godoc.org/path#Match for supported patterns.
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
matched, err := path.Match(pattern, FamiliarString(ref))
if namedRef, isNamed := ref.(Named); isNamed && !matched {
matched, _ = path.Match(pattern, FamiliarName(namedRef))
}
return matched, err
}

View File

@ -0,0 +1,171 @@
package reference
import (
"errors"
"fmt"
"strings"
"github.com/docker/distribution/digestset"
"github.com/opencontainers/go-digest"
)
var (
legacyDefaultDomain = "index.docker.io"
defaultDomain = "docker.io"
officialRepoName = "library"
defaultTag = "latest"
)
// normalizedNamed represents a name which has been
// normalized and has a familiar form. A familiar name
// is what is used in Docker UI. An example normalized
// name is "docker.io/library/ubuntu" and corresponding
// familiar name of "ubuntu".
type normalizedNamed interface {
Named
Familiar() Named
}
// ParseNormalizedNamed parses a string into a named reference
// transforming a familiar name from Docker UI to a fully
// qualified reference. If the value may be an identifier
// use ParseAnyReference.
func ParseNormalizedNamed(s string) (Named, error) {
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
}
domain, remainder := splitDockerDomain(s)
var remoteName string
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
remoteName = remainder[:tagSep]
} else {
remoteName = remainder
}
if strings.ToLower(remoteName) != remoteName {
return nil, errors.New("invalid reference format: repository name must be lowercase")
}
ref, err := Parse(domain + "/" + remainder)
if err != nil {
return nil, err
}
named, isNamed := ref.(Named)
if !isNamed {
return nil, fmt.Errorf("reference %s has no name", ref.String())
}
return named, nil
}
// splitDockerDomain splits a repository name to domain and remotename string.
// If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before.
func splitDockerDomain(name string) (domain, remainder string) {
i := strings.IndexRune(name, '/')
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
domain, remainder = defaultDomain, name
} else {
domain, remainder = name[:i], name[i+1:]
}
if domain == legacyDefaultDomain {
domain = defaultDomain
}
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
remainder = officialRepoName + "/" + remainder
}
return
}
// familiarizeName returns a shortened version of the name familiar
// to to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
// Returns a familiarized named only reference.
func familiarizeName(named namedRepository) repository {
repo := repository{
domain: named.Domain(),
path: named.Path(),
}
if repo.domain == defaultDomain {
repo.domain = ""
// Handle official repositories which have the pattern "library/<official repo name>"
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
repo.path = split[1]
}
}
return repo
}
func (r reference) Familiar() Named {
return reference{
namedRepository: familiarizeName(r.namedRepository),
tag: r.tag,
digest: r.digest,
}
}
func (r repository) Familiar() Named {
return familiarizeName(r)
}
func (t taggedReference) Familiar() Named {
return taggedReference{
namedRepository: familiarizeName(t.namedRepository),
tag: t.tag,
}
}
func (c canonicalReference) Familiar() Named {
return canonicalReference{
namedRepository: familiarizeName(c.namedRepository),
digest: c.digest,
}
}
// EnsureTagged adds the default tag "latest" to a reference if it only has
// a repo name.
func EnsureTagged(ref Named) NamedTagged {
namedTagged, ok := ref.(NamedTagged)
if !ok {
namedTagged, err := WithTag(ref, defaultTag)
if err != nil {
// Default tag must be valid, to create a NamedTagged
// type with non-validated input the WithTag function
// should be used instead
panic(err)
}
return namedTagged
}
return namedTagged
}
// ParseAnyReference parses a reference string as a possible identifier,
// full digest, or familiar name.
func ParseAnyReference(ref string) (Reference, error) {
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
return digestReference("sha256:" + ref), nil
}
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
return ParseNormalizedNamed(ref)
}
// ParseAnyReferenceWithSet parses a reference string as a possible short
// identifier to be matched in a digest set, a full digest, or familiar name.
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
dgst, err := ds.Lookup(ref)
if err == nil {
return digestReference(dgst), nil
}
} else {
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
}
return ParseNormalizedNamed(ref)
}

View File

@ -4,11 +4,11 @@
// Grammar // Grammar
// //
// reference := name [ ":" tag ] [ "@" digest ] // reference := name [ ":" tag ] [ "@" digest ]
// name := [hostname '/'] component ['/' component]* // name := [domain '/'] path-component ['/' path-component]*
// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] // domain := domain-component ['.' domain-component]* [':' port-number]
// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/ // port-number := /[0-9]+/
// component := alpha-numeric [separator alpha-numeric]* // path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/ // alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/ // separator := /[_.]|__|[-]*/
// //
@ -19,13 +19,17 @@
// digest-algorithm-separator := /[+.-_]/ // digest-algorithm-separator := /[+.-_]/
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
//
// identifier := /[a-f0-9]{64}/
// short-identifier := /[a-f0-9]{6,64}/
package reference package reference
import ( import (
"errors" "errors"
"fmt" "fmt"
"strings"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
) )
const ( const (
@ -43,11 +47,17 @@ var (
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
ErrDigestInvalidFormat = errors.New("invalid digest format") ErrDigestInvalidFormat = errors.New("invalid digest format")
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
// ErrNameEmpty is returned for empty, invalid repository names. // ErrNameEmpty is returned for empty, invalid repository names.
ErrNameEmpty = errors.New("repository name must have at least one component") ErrNameEmpty = errors.New("repository name must have at least one component")
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
// ErrNameNotCanonical is returned when a name is not canonical.
ErrNameNotCanonical = errors.New("repository name must be canonical")
) )
// Reference is an opaque object reference identifier that may include // Reference is an opaque object reference identifier that may include
@ -121,23 +131,56 @@ type Digested interface {
} }
// Canonical reference is an object with a fully unique // Canonical reference is an object with a fully unique
// name including a name with hostname and digest // name including a name with domain and digest
type Canonical interface { type Canonical interface {
Named Named
Digest() digest.Digest Digest() digest.Digest
} }
// namedRepository is a reference to a repository with a name.
// A namedRepository has both domain and path components.
type namedRepository interface {
Named
Domain() string
Path() string
}
// Domain returns the domain part of the Named reference
func Domain(named Named) string {
if r, ok := named.(namedRepository); ok {
return r.Domain()
}
domain, _ := splitDomain(named.Name())
return domain
}
// Path returns the name without the domain part of the Named reference
func Path(named Named) (name string) {
if r, ok := named.(namedRepository); ok {
return r.Path()
}
_, path := splitDomain(named.Name())
return path
}
func splitDomain(name string) (string, string) {
match := anchoredNameRegexp.FindStringSubmatch(name)
if len(match) != 3 {
return "", name
}
return match[1], match[2]
}
// SplitHostname splits a named reference into a // SplitHostname splits a named reference into a
// hostname and name string. If no valid hostname is // hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value // found, the hostname is empty and the full value
// is returned as name // is returned as name
// DEPRECATED: Use Domain or Path
func SplitHostname(named Named) (string, string) { func SplitHostname(named Named) (string, string) {
name := named.Name() if r, ok := named.(namedRepository); ok {
match := anchoredNameRegexp.FindStringSubmatch(name) return r.Domain(), r.Path()
if match == nil || len(match) != 3 {
return "", name
} }
return match[1], match[2] return splitDomain(named.Name())
} }
// Parse parses s and returns a syntactically valid Reference. // Parse parses s and returns a syntactically valid Reference.
@ -149,7 +192,9 @@ func Parse(s string) (Reference, error) {
if s == "" { if s == "" {
return nil, ErrNameEmpty return nil, ErrNameEmpty
} }
// TODO(dmcgowan): Provide more specific and helpful error if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
return nil, ErrNameContainsUppercase
}
return nil, ErrReferenceInvalidFormat return nil, ErrReferenceInvalidFormat
} }
@ -157,13 +202,24 @@ func Parse(s string) (Reference, error) {
return nil, ErrNameTooLong return nil, ErrNameTooLong
} }
var repo repository
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
if nameMatch != nil && len(nameMatch) == 3 {
repo.domain = nameMatch[1]
repo.path = nameMatch[2]
} else {
repo.domain = ""
repo.path = matches[1]
}
ref := reference{ ref := reference{
name: matches[1], namedRepository: repo,
tag: matches[2], tag: matches[2],
} }
if matches[3] != "" { if matches[3] != "" {
var err error var err error
ref.digest, err = digest.ParseDigest(matches[3]) ref.digest, err = digest.Parse(matches[3])
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -178,18 +234,17 @@ func Parse(s string) (Reference, error) {
} }
// ParseNamed parses s and returns a syntactically valid reference implementing // ParseNamed parses s and returns a syntactically valid reference implementing
// the Named interface. The reference must have a name, otherwise an error is // the Named interface. The reference must have a name and be in the canonical
// returned. // form, otherwise an error is returned.
// If an error was encountered it is returned, along with a nil Reference. // If an error was encountered it is returned, along with a nil Reference.
// NOTE: ParseNamed will not handle short digests. // NOTE: ParseNamed will not handle short digests.
func ParseNamed(s string) (Named, error) { func ParseNamed(s string) (Named, error) {
ref, err := Parse(s) named, err := ParseNormalizedNamed(s)
if err != nil { if err != nil {
return nil, err return nil, err
} }
named, isNamed := ref.(Named) if named.String() != s {
if !isNamed { return nil, ErrNameNotCanonical
return nil, fmt.Errorf("reference %s has no name", ref.String())
} }
return named, nil return named, nil
} }
@ -200,10 +255,15 @@ func WithName(name string) (Named, error) {
if len(name) > NameTotalLengthMax { if len(name) > NameTotalLengthMax {
return nil, ErrNameTooLong return nil, ErrNameTooLong
} }
if !anchoredNameRegexp.MatchString(name) {
match := anchoredNameRegexp.FindStringSubmatch(name)
if match == nil || len(match) != 3 {
return nil, ErrReferenceInvalidFormat return nil, ErrReferenceInvalidFormat
} }
return repository(name), nil return repository{
domain: match[1],
path: match[2],
}, nil
} }
// WithTag combines the name from "name" and the tag from "tag" to form a // WithTag combines the name from "name" and the tag from "tag" to form a
@ -212,8 +272,22 @@ func WithTag(name Named, tag string) (NamedTagged, error) {
if !anchoredTagRegexp.MatchString(tag) { if !anchoredTagRegexp.MatchString(tag) {
return nil, ErrTagInvalidFormat return nil, ErrTagInvalidFormat
} }
var repo repository
if r, ok := name.(namedRepository); ok {
repo.domain = r.Domain()
repo.path = r.Path()
} else {
repo.path = name.Name()
}
if canonical, ok := name.(Canonical); ok {
return reference{
namedRepository: repo,
tag: tag,
digest: canonical.Digest(),
}, nil
}
return taggedReference{ return taggedReference{
name: name.Name(), namedRepository: repo,
tag: tag, tag: tag,
}, nil }, nil
} }
@ -224,14 +298,37 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
if !anchoredDigestRegexp.MatchString(digest.String()) { if !anchoredDigestRegexp.MatchString(digest.String()) {
return nil, ErrDigestInvalidFormat return nil, ErrDigestInvalidFormat
} }
var repo repository
if r, ok := name.(namedRepository); ok {
repo.domain = r.Domain()
repo.path = r.Path()
} else {
repo.path = name.Name()
}
if tagged, ok := name.(Tagged); ok {
return reference{
namedRepository: repo,
tag: tagged.Tag(),
digest: digest,
}, nil
}
return canonicalReference{ return canonicalReference{
name: name.Name(), namedRepository: repo,
digest: digest, digest: digest,
}, nil }, nil
} }
// TrimNamed removes any tag or digest from the named reference.
func TrimNamed(ref Named) Named {
domain, path := SplitHostname(ref)
return repository{
domain: domain,
path: path,
}
}
func getBestReferenceType(ref reference) Reference { func getBestReferenceType(ref reference) Reference {
if ref.name == "" { if ref.Name() == "" {
// Allow digest only references // Allow digest only references
if ref.digest != "" { if ref.digest != "" {
return digestReference(ref.digest) return digestReference(ref.digest)
@ -241,15 +338,15 @@ func getBestReferenceType(ref reference) Reference {
if ref.tag == "" { if ref.tag == "" {
if ref.digest != "" { if ref.digest != "" {
return canonicalReference{ return canonicalReference{
name: ref.name, namedRepository: ref.namedRepository,
digest: ref.digest, digest: ref.digest,
} }
} }
return repository(ref.name) return ref.namedRepository
} }
if ref.digest == "" { if ref.digest == "" {
return taggedReference{ return taggedReference{
name: ref.name, namedRepository: ref.namedRepository,
tag: ref.tag, tag: ref.tag,
} }
} }
@ -258,17 +355,13 @@ func getBestReferenceType(ref reference) Reference {
} }
type reference struct { type reference struct {
name string namedRepository
tag string tag string
digest digest.Digest digest digest.Digest
} }
func (r reference) String() string { func (r reference) String() string {
return r.name + ":" + r.tag + "@" + r.digest.String() return r.Name() + ":" + r.tag + "@" + r.digest.String()
}
func (r reference) Name() string {
return r.name
} }
func (r reference) Tag() string { func (r reference) Tag() string {
@ -279,14 +372,28 @@ func (r reference) Digest() digest.Digest {
return r.digest return r.digest
} }
type repository string type repository struct {
domain string
path string
}
func (r repository) String() string { func (r repository) String() string {
return string(r) return r.Name()
} }
func (r repository) Name() string { func (r repository) Name() string {
return string(r) if r.domain == "" {
return r.path
}
return r.domain + "/" + r.path
}
func (r repository) Domain() string {
return r.domain
}
func (r repository) Path() string {
return r.path
} }
type digestReference digest.Digest type digestReference digest.Digest
@ -300,16 +407,12 @@ func (d digestReference) Digest() digest.Digest {
} }
type taggedReference struct { type taggedReference struct {
name string namedRepository
tag string tag string
} }
func (t taggedReference) String() string { func (t taggedReference) String() string {
return t.name + ":" + t.tag return t.Name() + ":" + t.tag
}
func (t taggedReference) Name() string {
return t.name
} }
func (t taggedReference) Tag() string { func (t taggedReference) Tag() string {
@ -317,16 +420,12 @@ func (t taggedReference) Tag() string {
} }
type canonicalReference struct { type canonicalReference struct {
name string namedRepository
digest digest.Digest digest digest.Digest
} }
func (c canonicalReference) String() string { func (c canonicalReference) String() string {
return c.name + "@" + c.digest.String() return c.Name() + "@" + c.digest.String()
}
func (c canonicalReference) Name() string {
return c.name
} }
func (c canonicalReference) Digest() digest.Digest { func (c canonicalReference) Digest() digest.Digest {

View File

@ -19,18 +19,18 @@ var (
alphaNumericRegexp, alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp))) optional(repeated(separatorRegexp, alphaNumericRegexp)))
// hostnameComponentRegexp restricts the registry hostname component of a // domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by hostnameRegexp // repository name to start with a component as defined by domainRegexp
// and followed by an optional port. // and followed by an optional port.
hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
// hostnameRegexp defines the structure of potential hostname components // domainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is // that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image // allowed by DNS to ensure backwards compatibility with Docker image
// names. // names.
hostnameRegexp = expression( domainRegexp = expression(
hostnameComponentRegexp, domainComponentRegexp,
optional(repeated(literal(`.`), hostnameComponentRegexp)), optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`))) optional(literal(`:`), match(`[0-9]+`)))
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go. // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
@ -48,17 +48,17 @@ var (
anchoredDigestRegexp = anchored(DigestRegexp) anchoredDigestRegexp = anchored(DigestRegexp)
// NameRegexp is the format for the name component of references. The // NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the hostname and name part omitting // regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either. // the separating forward slash from either.
NameRegexp = expression( NameRegexp = expression(
optional(hostnameRegexp, literal(`/`)), optional(domainRegexp, literal(`/`)),
nameComponentRegexp, nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp))) optional(repeated(literal(`/`), nameComponentRegexp)))
// anchoredNameRegexp is used to parse a name value, capturing the // anchoredNameRegexp is used to parse a name value, capturing the
// hostname and trailing components. // domain and trailing components.
anchoredNameRegexp = anchored( anchoredNameRegexp = anchored(
optional(capture(hostnameRegexp), literal(`/`)), optional(capture(domainRegexp), literal(`/`)),
capture(nameComponentRegexp, capture(nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))) optional(repeated(literal(`/`), nameComponentRegexp))))
@ -68,6 +68,25 @@ var (
ReferenceRegexp = anchored(capture(NameRegexp), ReferenceRegexp = anchored(capture(NameRegexp),
optional(literal(":"), capture(TagRegexp)), optional(literal(":"), capture(TagRegexp)),
optional(literal("@"), capture(DigestRegexp))) optional(literal("@"), capture(DigestRegexp)))
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = match(`([a-f0-9]{64})`)
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
// anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
) )
// match compiles the string to a regular expression. // match compiles the string to a regular expression.

View File

@ -35,7 +35,7 @@ type Namespace interface {
// reference. // reference.
Repository(ctx context.Context, name reference.Named) (Repository, error) Repository(ctx context.Context, name reference.Named) (Repository, error)
// Repositories fills 'repos' with a lexigraphically sorted catalog of repositories // Repositories fills 'repos' with a lexicographically sorted catalog of repositories
// up to the size of 'repos' and returns the value 'n' for the number of entries // up to the size of 'repos' and returns the value 'n' for the number of entries
// which were filled. 'last' contains an offset in the catalog, and 'err' will be // which were filled. 'last' contains an offset in the catalog, and 'err' will be
// set to io.EOF if there are no more entries to obtain. // set to io.EOF if there are no more entries to obtain.

View File

@ -4,9 +4,9 @@ import (
"net/http" "net/http"
"regexp" "regexp"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/errcode"
"github.com/opencontainers/go-digest"
) )
var ( var (
@ -175,6 +175,27 @@ var (
errcode.ErrorCodeDenied, errcode.ErrorCodeDenied,
}, },
} }
tooManyRequestsDescriptor = ResponseDescriptor{
Name: "Too Many Requests",
StatusCode: http.StatusTooManyRequests,
Description: "The client made too many requests within a time interval.",
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "Length of the JSON response body.",
Format: "<length>",
},
},
Body: BodyDescriptor{
ContentType: "application/json; charset=utf-8",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeTooManyRequests,
},
}
) )
const ( const (
@ -202,17 +223,6 @@ const (
... ...
] ]
}` }`
unauthorizedErrorsBody = `{
"errors:" [
{
"code": "UNAUTHORIZED",
"message": "access to the requested resource is not authorized",
"detail": ...
},
...
]
}`
) )
// APIDescriptor exports descriptions of the layout of the v2 registry API. // APIDescriptor exports descriptions of the layout of the v2 registry API.
@ -391,6 +401,7 @@ var routeDescriptors = []RouteDescriptor{
StatusCode: http.StatusNotFound, StatusCode: http.StatusNotFound,
}, },
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },
@ -445,6 +456,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
{ {
@ -481,6 +493,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },
@ -535,6 +548,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },
@ -592,6 +606,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
{ {
Name: "Missing Layer(s)", Name: "Missing Layer(s)",
Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.",
@ -661,6 +676,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
{ {
Name: "Unknown Manifest", Name: "Unknown Manifest",
Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.",
@ -769,6 +785,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
{ {
@ -843,6 +860,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },
@ -909,6 +927,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },
@ -993,6 +1012,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
{ {
@ -1039,6 +1059,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
{ {
@ -1103,6 +1124,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },
@ -1175,6 +1197,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },
@ -1249,6 +1272,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
{ {
@ -1334,6 +1358,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },
@ -1424,6 +1449,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },
@ -1480,6 +1506,7 @@ var routeDescriptors = []RouteDescriptor{
unauthorizedResponseDescriptor, unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor, repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor, deniedResponseDescriptor,
tooManyRequestsDescriptor,
}, },
}, },
}, },

View File

@ -0,0 +1,161 @@
package v2
import (
"fmt"
"regexp"
"strings"
"unicode"
)
var (
// according to rfc7230
reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`)
reQuotedValue = regexp.MustCompile(`^[^\\"]+`)
reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`)
)
// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains
// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The
// function parses only the first element of the list, which is set by the very first proxy. It returns a map
// of corresponding key-value pairs and an unparsed slice of the input string.
//
// Examples of Forwarded header values:
//
// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown
// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80"
//
// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into
// {"for": "192.0.2.43:443", "host": "registry.example.org"}.
func parseForwardedHeader(forwarded string) (map[string]string, string, error) {
// Following are states of forwarded header parser. Any state could transition to a failure.
const (
// terminating state; can transition to Parameter
stateElement = iota
// terminating state; can transition to KeyValueDelimiter
stateParameter
// can transition to Value
stateKeyValueDelimiter
// can transition to one of { QuotedValue, PairEnd }
stateValue
// can transition to one of { EscapedCharacter, PairEnd }
stateQuotedValue
// can transition to one of { QuotedValue }
stateEscapedCharacter
// terminating state; can transition to one of { Parameter, Element }
statePairEnd
)
var (
parameter string
value string
parse = forwarded[:]
res = map[string]string{}
state = stateElement
)
Loop:
for {
// skip spaces unless in quoted value
if state != stateQuotedValue && state != stateEscapedCharacter {
parse = strings.TrimLeftFunc(parse, unicode.IsSpace)
}
if len(parse) == 0 {
if state != stateElement && state != statePairEnd && state != stateParameter {
return nil, parse, fmt.Errorf("unexpected end of input")
}
// terminating
break
}
switch state {
// terminate at list element delimiter
case stateElement:
if parse[0] == ',' {
parse = parse[1:]
break Loop
}
state = stateParameter
// parse parameter (the key of key-value pair)
case stateParameter:
match := reToken.FindString(parse)
if len(match) == 0 {
return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse))
}
parameter = strings.ToLower(match)
parse = parse[len(match):]
state = stateKeyValueDelimiter
// parse '='
case stateKeyValueDelimiter:
if parse[0] != '=' {
return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse))
}
parse = parse[1:]
state = stateValue
// parse value or quoted value
case stateValue:
if parse[0] == '"' {
parse = parse[1:]
state = stateQuotedValue
} else {
value = reToken.FindString(parse)
if len(value) == 0 {
return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse))
}
if _, exists := res[parameter]; exists {
return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse))
}
res[parameter] = value
parse = parse[len(value):]
value = ""
state = statePairEnd
}
// parse a part of quoted value until the first backslash
case stateQuotedValue:
match := reQuotedValue.FindString(parse)
value += match
parse = parse[len(match):]
switch {
case len(parse) == 0:
return nil, parse, fmt.Errorf("unterminated quoted string")
case parse[0] == '"':
res[parameter] = value
value = ""
parse = parse[1:]
state = statePairEnd
case parse[0] == '\\':
parse = parse[1:]
state = stateEscapedCharacter
}
// parse escaped character in a quoted string, ignore the backslash
// transition back to QuotedValue state
case stateEscapedCharacter:
c := reEscapedCharacter.FindString(parse)
if len(c) == 0 {
return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1)
}
value += c
parse = parse[1:]
state = stateQuotedValue
// expect either a new key-value pair, new list or end of input
case statePairEnd:
switch parse[0] {
case ';':
parse = parse[1:]
state = stateParameter
case ',':
state = stateElement
default:
return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse))
}
}
}
return res, parse, nil
}

View File

@ -1,8 +1,10 @@
package v2 package v2
import ( import (
"net"
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"strings" "strings"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
@ -49,10 +51,14 @@ func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
var scheme string var scheme string
forwardedProto := r.Header.Get("X-Forwarded-Proto") forwardedProto := r.Header.Get("X-Forwarded-Proto")
// TODO: log the error
forwardedHeader, _, _ := parseForwardedHeader(r.Header.Get("Forwarded"))
switch { switch {
case len(forwardedProto) > 0: case len(forwardedProto) > 0:
scheme = forwardedProto scheme = forwardedProto
case len(forwardedHeader["proto"]) > 0:
scheme = forwardedHeader["proto"]
case r.TLS != nil: case r.TLS != nil:
scheme = "https" scheme = "https"
case len(r.URL.Scheme) > 0: case len(r.URL.Scheme) > 0:
@ -62,14 +68,46 @@ func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
} }
host := r.Host host := r.Host
forwardedHost := r.Header.Get("X-Forwarded-Host")
if len(forwardedHost) > 0 { if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a // According to the Apache mod_proxy docs, X-Forwarded-Host can be a
// comma-separated list of hosts, to which each proxy appends the // comma-separated list of hosts, to which each proxy appends the
// requested host. We want to grab the first from this comma-separated // requested host. We want to grab the first from this comma-separated
// list. // list.
hosts := strings.SplitN(forwardedHost, ",", 2) hosts := strings.SplitN(forwardedHost, ",", 2)
host = strings.TrimSpace(hosts[0]) host = strings.TrimSpace(hosts[0])
} else if addr, exists := forwardedHeader["for"]; exists {
host = addr
} else if h, exists := forwardedHeader["host"]; exists {
host = h
}
portLessHost, port := host, ""
if !isIPv6Address(portLessHost) {
// with go 1.6, this would treat the last part of IPv6 address as a port
portLessHost, port, _ = net.SplitHostPort(host)
}
if forwardedPort := r.Header.Get("X-Forwarded-Port"); len(port) == 0 && len(forwardedPort) > 0 {
ports := strings.SplitN(forwardedPort, ",", 2)
forwardedPort = strings.TrimSpace(ports[0])
if _, err := strconv.ParseInt(forwardedPort, 10, 32); err == nil {
port = forwardedPort
}
}
if len(portLessHost) > 0 {
host = portLessHost
}
if len(port) > 0 {
// remove enclosing brackets of ipv6 address otherwise they will be duplicated
if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
host = host[1 : len(host)-1]
}
// JoinHostPort properly encloses ipv6 addresses in square brackets
host = net.JoinHostPort(host, port)
} else if isIPv6Address(host) && host[0] != '[' {
// ipv6 needs to be enclosed in square brackets in urls
host = "[" + host + "]"
} }
basePath := routeDescriptorsMap[RouteNameBase].Path basePath := routeDescriptorsMap[RouteNameBase].Path
@ -249,3 +287,28 @@ func appendValues(u string, values ...url.Values) string {
return appendValuesURL(up, values...).String() return appendValuesURL(up, values...).String()
} }
// isIPv6Address returns true if given string is a valid IPv6 address. No port is allowed. The address may be
// enclosed in square brackets.
func isIPv6Address(host string) bool {
if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
host = host[1 : len(host)-1]
}
// The IPv6 scoped addressing zone identifier starts after the last percent sign.
if i := strings.LastIndexByte(host, '%'); i > 0 {
host = host[:i]
}
ip := net.ParseIP(host)
if ip == nil {
return false
}
if ip.To16() == nil {
return false
}
if ip.To4() == nil {
return true
}
// dot can be present in ipv4-mapped address, it needs to come after a colon though
i := strings.IndexAny(host, ":.")
return i >= 0 && host[i] == ':'
}

View File

@ -0,0 +1,27 @@
package challenge
import (
"net/url"
"strings"
)
// FROM: https://golang.org/src/net/http/http.go
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
// return true if the string includes a port.
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
// FROM: http://golang.org/src/net/http/transport.go
var portMap = map[string]string{
"http": "80",
"https": "443",
}
// canonicalAddr returns url.Host but always with a ":port" suffix
// FROM: http://golang.org/src/net/http/transport.go
func canonicalAddr(url *url.URL) string {
addr := url.Host
if !hasPort(addr) {
return addr + ":" + portMap[url.Scheme]
}
return addr
}

View File

@ -0,0 +1,237 @@
package challenge
import (
"fmt"
"net/http"
"net/url"
"strings"
"sync"
)
// Challenge carries information from a WWW-Authenticate response header.
// See RFC 2617.
type Challenge struct {
// Scheme is the auth-scheme according to RFC 2617
Scheme string
// Parameters are the auth-params according to RFC 2617
Parameters map[string]string
}
// Manager manages the challenges for endpoints.
// The challenges are pulled out of HTTP responses. Only
// responses which expect challenges should be added to
// the manager, since a non-unauthorized request will be
// viewed as not requiring challenges.
type Manager interface {
// GetChallenges returns the challenges for the given
// endpoint URL.
GetChallenges(endpoint url.URL) ([]Challenge, error)
// AddResponse adds the response to the challenge
// manager. The challenges will be parsed out of
// the WWW-Authenicate headers and added to the
// URL which was produced the response. If the
// response was authorized, any challenges for the
// endpoint will be cleared.
AddResponse(resp *http.Response) error
}
// NewSimpleManager returns an instance of
// Manger which only maps endpoints to challenges
// based on the responses which have been added the
// manager. The simple manager will make no attempt to
// perform requests on the endpoints or cache the responses
// to a backend.
func NewSimpleManager() Manager {
return &simpleManager{
Challanges: make(map[string][]Challenge),
}
}
type simpleManager struct {
sync.RWMutex
Challanges map[string][]Challenge
}
func normalizeURL(endpoint *url.URL) {
endpoint.Host = strings.ToLower(endpoint.Host)
endpoint.Host = canonicalAddr(endpoint)
}
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
normalizeURL(&endpoint)
m.RLock()
defer m.RUnlock()
challenges := m.Challanges[endpoint.String()]
return challenges, nil
}
func (m *simpleManager) AddResponse(resp *http.Response) error {
challenges := ResponseChallenges(resp)
if resp.Request == nil {
return fmt.Errorf("missing request reference")
}
urlCopy := url.URL{
Path: resp.Request.URL.Path,
Host: resp.Request.URL.Host,
Scheme: resp.Request.URL.Scheme,
}
normalizeURL(&urlCopy)
m.Lock()
defer m.Unlock()
m.Challanges[urlCopy.String()] = challenges
return nil
}
// Octet types from RFC 2616.
type octetType byte
var octetTypes [256]octetType
const (
isToken octetType = 1 << iota
isSpace
)
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}
// ResponseChallenges returns a list of authorization challenges
// for the given http Response. Challenges are only checked if
// the response status code was a 401.
func ResponseChallenges(resp *http.Response) []Challenge {
if resp.StatusCode == http.StatusUnauthorized {
// Parse the WWW-Authenticate Header and store the challenges
// on this endpoint object.
return parseAuthHeader(resp.Header)
}
return nil
}
func parseAuthHeader(header http.Header) []Challenge {
challenges := []Challenge{}
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
if v != "" {
challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
}
}
return challenges
}
func parseValueAndParams(header string) (value string, params map[string]string) {
params = make(map[string]string)
value, s := expectToken(header)
if value == "" {
return
}
value = strings.ToLower(value)
s = "," + skipSpace(s)
for strings.HasPrefix(s, ",") {
var pkey string
pkey, s = expectToken(skipSpace(s[1:]))
if pkey == "" {
return
}
if !strings.HasPrefix(s, "=") {
return
}
var pvalue string
pvalue, s = expectTokenOrQuoted(s[1:])
if pvalue == "" {
return
}
pkey = strings.ToLower(pkey)
params[pkey] = pvalue
s = skipSpace(s)
}
return
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isToken == 0 {
break
}
}
return s[:i], s[i:]
}
func expectTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return expectToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j++
}
}
return "", ""
}
}
return "", ""
}

View File

@ -9,6 +9,7 @@ import (
"net/http" "net/http"
"github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/client/auth/challenge"
) )
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty // ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
@ -82,21 +83,52 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
return errors return errors
} }
func makeErrorList(err error) []error {
if errL, ok := err.(errcode.Errors); ok {
return []error(errL)
}
return []error{err}
}
func mergeErrors(err1, err2 error) error {
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
}
// HandleErrorResponse returns error parsed from HTTP response for an // HandleErrorResponse returns error parsed from HTTP response for an
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An // unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
// UnexpectedHTTPStatusError returned for response code outside of expected // UnexpectedHTTPStatusError returned for response code outside of expected
// range. // range.
func HandleErrorResponse(resp *http.Response) error { func HandleErrorResponse(resp *http.Response) error {
if resp.StatusCode == 401 { if resp.StatusCode >= 400 && resp.StatusCode < 500 {
// Check for OAuth errors within the `WWW-Authenticate` header first
// See https://tools.ietf.org/html/rfc6750#section-3
for _, c := range challenge.ResponseChallenges(resp) {
if c.Scheme == "bearer" {
var err errcode.Error
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
switch c.Parameters["error"] {
case "invalid_token":
err.Code = errcode.ErrorCodeUnauthorized
case "insufficient_scope":
err.Code = errcode.ErrorCodeDenied
default:
continue
}
if description := c.Parameters["error_description"]; description != "" {
err.Message = description
} else {
err.Message = err.Code.Message()
}
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
}
}
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
} }
return err return err
} }
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
return parseHTTPErrorResponse(resp.StatusCode, resp.Body)
}
return &UnexpectedHTTPStatusError{Status: resp.Status} return &UnexpectedHTTPStatusError{Status: resp.Status}
} }

View File

@ -15,12 +15,12 @@ import (
"github.com/docker/distribution" "github.com/docker/distribution"
"github.com/docker/distribution/context" "github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/client/transport"
"github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache"
"github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/cache/memory"
"github.com/opencontainers/go-digest"
) )
// Registry provides an interface for calling Repositories, which returns a catalog of repositories. // Registry provides an interface for calling Repositories, which returns a catalog of repositories.
@ -268,7 +268,7 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e
return desc, nil return desc, nil
} }
dgst, err := digest.ParseDigest(digestHeader) dgst, err := digest.Parse(digestHeader)
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }
@ -301,18 +301,20 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }
req, err := http.NewRequest("HEAD", u, nil) newRequest := func(method string) (*http.Response, error) {
req, err := http.NewRequest(method, u, nil)
if err != nil { if err != nil {
return distribution.Descriptor{}, err return nil, err
} }
for _, t := range distribution.ManifestMediaTypes() { for _, t := range distribution.ManifestMediaTypes() {
req.Header.Add("Accept", t) req.Header.Add("Accept", t)
} }
var attempts int
resp, err := t.client.Do(req) resp, err := t.client.Do(req)
check: return resp, err
}
resp, err := newRequest("HEAD")
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }
@ -321,23 +323,20 @@ check:
switch { switch {
case resp.StatusCode >= 200 && resp.StatusCode < 400: case resp.StatusCode >= 200 && resp.StatusCode < 400:
return descriptorFromResponse(resp) return descriptorFromResponse(resp)
case resp.StatusCode == http.StatusMethodNotAllowed: default:
req, err = http.NewRequest("GET", u, nil) // if the response is an error - there will be no body to decode.
// Issue a GET request:
// - for data from a server that does not handle HEAD
// - to get error details in case of a failure
resp, err = newRequest("GET")
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }
defer resp.Body.Close()
for _, t := range distribution.ManifestMediaTypes() { if resp.StatusCode >= 200 && resp.StatusCode < 400 {
req.Header.Add("Accept", t) return descriptorFromResponse(resp)
} }
resp, err = t.client.Do(req)
attempts++
if attempts > 1 {
return distribution.Descriptor{}, err
}
goto check
default:
return distribution.Descriptor{}, HandleErrorResponse(resp) return distribution.Descriptor{}, HandleErrorResponse(resp)
} }
} }
@ -476,7 +475,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
return nil, distribution.ErrManifestNotModified return nil, distribution.ErrManifestNotModified
} else if SuccessStatus(resp.StatusCode) { } else if SuccessStatus(resp.StatusCode) {
if contentDgst != nil { if contentDgst != nil {
dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
if err == nil { if err == nil {
*contentDgst = dgst *contentDgst = dgst
} }
@ -554,7 +553,7 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options .
if SuccessStatus(resp.StatusCode) { if SuccessStatus(resp.StatusCode) {
dgstHeader := resp.Header.Get("Docker-Content-Digest") dgstHeader := resp.Header.Get("Docker-Content-Digest")
dgst, err := digest.ParseDigest(dgstHeader) dgst, err := digest.Parse(dgstHeader)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -662,7 +661,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }
dgstr := digest.Canonical.New() dgstr := digest.Canonical.Digester()
n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
@ -680,15 +679,6 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
return writer.Commit(ctx, desc) return writer.Commit(ctx, desc)
} }
// createOptions is a collection of blob creation modifiers relevant to general
// blob storage intended to be configured by the BlobCreateOption.Apply method.
type createOptions struct {
Mount struct {
ShouldMount bool
From reference.Canonical
}
}
type optionFunc func(interface{}) error type optionFunc func(interface{}) error
func (f optionFunc) Apply(v interface{}) error { func (f optionFunc) Apply(v interface{}) error {
@ -699,7 +689,7 @@ func (f optionFunc) Apply(v interface{}) error {
// mounted from the given canonical reference. // mounted from the given canonical reference.
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
return optionFunc(func(v interface{}) error { return optionFunc(func(v interface{}) error {
opts, ok := v.(*createOptions) opts, ok := v.(*distribution.CreateOptions)
if !ok { if !ok {
return fmt.Errorf("unexpected options type: %T", v) return fmt.Errorf("unexpected options type: %T", v)
} }
@ -712,7 +702,7 @@ func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
} }
func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
var opts createOptions var opts distribution.CreateOptions
for _, option := range options { for _, option := range options {
err := option.Apply(&opts) err := option.Apply(&opts)

View File

@ -181,6 +181,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
} }
req.Header.Add("Accept-Encoding", "identity")
resp, err := hrs.client.Do(req) resp, err := hrs.client.Do(req)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -2,7 +2,7 @@ package cache
import ( import (
"github.com/docker/distribution/context" "github.com/docker/distribution/context"
"github.com/docker/distribution/digest" "github.com/opencontainers/go-digest"
"github.com/docker/distribution" "github.com/docker/distribution"
) )

View File

@ -5,9 +5,9 @@ import (
"github.com/docker/distribution" "github.com/docker/distribution"
"github.com/docker/distribution/context" "github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache"
"github.com/opencontainers/go-digest"
) )
type inMemoryBlobDescriptorCacheProvider struct { type inMemoryBlobDescriptorCacheProvider struct {
@ -26,7 +26,7 @@ func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider
} }
func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
if _, err := reference.ParseNamed(repo); err != nil { if _, err := reference.ParseNormalizedNamed(repo); err != nil {
return nil, err return nil, err
} }
@ -77,37 +77,46 @@ type repositoryScopedInMemoryBlobDescriptorCache struct {
} }
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
if rsimbdcp.repository == nil { rsimbdcp.parent.mu.Lock()
repo := rsimbdcp.repository
rsimbdcp.parent.mu.Unlock()
if repo == nil {
return distribution.Descriptor{}, distribution.ErrBlobUnknown return distribution.Descriptor{}, distribution.ErrBlobUnknown
} }
return rsimbdcp.repository.Stat(ctx, dgst) return repo.Stat(ctx, dgst)
} }
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
if rsimbdcp.repository == nil { rsimbdcp.parent.mu.Lock()
repo := rsimbdcp.repository
rsimbdcp.parent.mu.Unlock()
if repo == nil {
return distribution.ErrBlobUnknown return distribution.ErrBlobUnknown
} }
return rsimbdcp.repository.Clear(ctx, dgst) return repo.Clear(ctx, dgst)
} }
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
if rsimbdcp.repository == nil {
// allocate map since we are setting it now.
rsimbdcp.parent.mu.Lock() rsimbdcp.parent.mu.Lock()
repo := rsimbdcp.repository
if repo == nil {
// allocate map since we are setting it now.
var ok bool var ok bool
// have to read back value since we may have allocated elsewhere. // have to read back value since we may have allocated elsewhere.
rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
if !ok { if !ok {
rsimbdcp.repository = newMapBlobDescriptorCache() repo = newMapBlobDescriptorCache()
rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository rsimbdcp.parent.repositories[rsimbdcp.repo] = repo
}
rsimbdcp.repository = repo
} }
rsimbdcp.parent.mu.Unlock() rsimbdcp.parent.mu.Unlock()
}
if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { if err := repo.SetDescriptor(ctx, dgst, desc); err != nil {
return err return err
} }