1
0
mirror of https://github.com/rancher/os.git synced 2025-09-05 08:42:38 +00:00

Bump libcompose and its dependencies

This commit is contained in:
Josh Curl
2016-05-23 17:22:40 -07:00
parent c18cd26e78
commit 50de80d09a
1109 changed files with 35052 additions and 125685 deletions

View File

@@ -1,568 +0,0 @@
package storage
import (
"bytes"
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/testutil"
)
// TestSimpleBlobUpload covers the blob upload process, exercising common
// error paths that might be seen during an upload.
func TestSimpleBlobUpload(t *testing.T) {
randomDataReader, dgst, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("error creating random reader: %v", err)
}
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
driver := inmemory.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err := registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
bs := repository.Blobs(ctx)
h := sha256.New()
rd := io.TeeReader(randomDataReader, h)
blobUpload, err := bs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
// Cancel the upload then restart it
if err := blobUpload.Cancel(ctx); err != nil {
t.Fatalf("unexpected error during upload cancellation: %v", err)
}
// Do a resume, get unknown upload
blobUpload, err = bs.Resume(ctx, blobUpload.ID())
if err != distribution.ErrBlobUploadUnknown {
t.Fatalf("unexpected error resuming upload, should be unknown: %v", err)
}
// Restart!
blobUpload, err = bs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
// Get the size of our random tarfile
randomDataSize, err := seekerSize(randomDataReader)
if err != nil {
t.Fatalf("error getting seeker size of random data: %v", err)
}
nn, err := io.Copy(blobUpload, rd)
if err != nil {
t.Fatalf("unexpected error uploading layer data: %v", err)
}
if nn != randomDataSize {
t.Fatalf("layer data write incomplete")
}
offset, err := blobUpload.Seek(0, os.SEEK_CUR)
if err != nil {
t.Fatalf("unexpected error seeking layer upload: %v", err)
}
if offset != nn {
t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn)
}
blobUpload.Close()
// Do a resume, for good fun
blobUpload, err = bs.Resume(ctx, blobUpload.ID())
if err != nil {
t.Fatalf("unexpected error resuming upload: %v", err)
}
sha256Digest := digest.NewDigest("sha256", h)
desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst})
if err != nil {
t.Fatalf("unexpected error finishing layer upload: %v", err)
}
// After finishing an upload, it should no longer exist.
if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown {
t.Fatalf("expected layer upload to be unknown, got %v", err)
}
// Test for existence.
statDesc, err := bs.Stat(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
}
if statDesc != desc {
t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
}
rc, err := bs.Open(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error opening blob for read: %v", err)
}
defer rc.Close()
h.Reset()
nn, err = io.Copy(h, rc)
if err != nil {
t.Fatalf("error reading layer: %v", err)
}
if nn != randomDataSize {
t.Fatalf("incorrect read length")
}
if digest.NewDigest("sha256", h) != sha256Digest {
t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest)
}
// Delete a blob
err = bs.Delete(ctx, desc.Digest)
if err != nil {
t.Fatalf("Unexpected error deleting blob")
}
d, err := bs.Stat(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected non-error stating deleted blob: %v", d)
}
switch err {
case distribution.ErrBlobUnknown:
break
default:
t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
}
_, err = bs.Open(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected success opening deleted blob for read")
}
switch err {
case distribution.ErrBlobUnknown:
break
default:
t.Errorf("Unexpected error type getting deleted manifest: %#v", err)
}
// Re-upload the blob
randomBlob, err := ioutil.ReadAll(randomDataReader)
if err != nil {
t.Fatalf("Error reading all of blob %s", err.Error())
}
expectedDigest := digest.FromBytes(randomBlob)
simpleUpload(t, bs, randomBlob, expectedDigest)
d, err = bs.Stat(ctx, expectedDigest)
if err != nil {
t.Errorf("unexpected error stat-ing blob")
}
if d.Digest != expectedDigest {
t.Errorf("Mismatching digest with restored blob")
}
_, err = bs.Open(ctx, expectedDigest)
if err != nil {
t.Errorf("Unexpected error opening blob")
}
// Reuse state to test delete with a delete-disabled registry
registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err = registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
bs = repository.Blobs(ctx)
err = bs.Delete(ctx, desc.Digest)
if err == nil {
t.Errorf("Unexpected success deleting while disabled")
}
}
// TestSimpleBlobRead just creates a simple blob file and ensures that basic
// open, read, seek, read works. More specific edge cases should be covered in
// other tests.
func TestSimpleBlobRead(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
driver := inmemory.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err := registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
bs := repository.Blobs(ctx)
randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string.
if err != nil {
t.Fatalf("error creating random data: %v", err)
}
// Test for existence.
desc, err := bs.Stat(ctx, dgst)
if err != distribution.ErrBlobUnknown {
t.Fatalf("expected not found error when testing for existence: %v", err)
}
rc, err := bs.Open(ctx, dgst)
if err != distribution.ErrBlobUnknown {
t.Fatalf("expected not found error when opening non-existent blob: %v", err)
}
randomLayerSize, err := seekerSize(randomLayerReader)
if err != nil {
t.Fatalf("error getting seeker size for random layer: %v", err)
}
descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize}
t.Logf("desc: %v", descBefore)
desc, err = addBlob(ctx, bs, descBefore, randomLayerReader)
if err != nil {
t.Fatalf("error adding blob to blobservice: %v", err)
}
if desc.Size != randomLayerSize {
t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize)
}
rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
if err != nil {
t.Fatalf("error opening blob with %v: %v", dgst, err)
}
defer rc.Close()
// Now check the sha digest and ensure its the same
h := sha256.New()
nn, err := io.Copy(h, rc)
if err != nil {
t.Fatalf("unexpected error copying to hash: %v", err)
}
if nn != randomLayerSize {
t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize)
}
sha256Digest := digest.NewDigest("sha256", h)
if sha256Digest != desc.Digest {
t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest)
}
// Now seek back the blob, read the whole thing and check against randomLayerData
offset, err := rc.Seek(0, os.SEEK_SET)
if err != nil {
t.Fatalf("error seeking blob: %v", err)
}
if offset != 0 {
t.Fatalf("seek failed: expected 0 offset, got %d", offset)
}
p, err := ioutil.ReadAll(rc)
if err != nil {
t.Fatalf("error reading all of blob: %v", err)
}
if len(p) != int(randomLayerSize) {
t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize)
}
// Reset the randomLayerReader and read back the buffer
_, err = randomLayerReader.Seek(0, os.SEEK_SET)
if err != nil {
t.Fatalf("error resetting layer reader: %v", err)
}
randomLayerData, err := ioutil.ReadAll(randomLayerReader)
if err != nil {
t.Fatalf("random layer read failed: %v", err)
}
if !bytes.Equal(p, randomLayerData) {
t.Fatalf("layer data not equal")
}
}
// TestBlobMount covers the blob mount process, exercising common
// error paths that might be seen during a mount.
func TestBlobMount(t *testing.T) {
randomDataReader, dgst, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("error creating random reader: %v", err)
}
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
sourceImageName, _ := reference.ParseNamed("foo/source")
driver := inmemory.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err := registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
sourceRepository, err := registry.Repository(ctx, sourceImageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
sbs := sourceRepository.Blobs(ctx)
blobUpload, err := sbs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err)
}
// Get the size of our random tarfile
randomDataSize, err := seekerSize(randomDataReader)
if err != nil {
t.Fatalf("error getting seeker size of random data: %v", err)
}
nn, err := io.Copy(blobUpload, randomDataReader)
if err != nil {
t.Fatalf("unexpected error uploading layer data: %v", err)
}
desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst})
if err != nil {
t.Fatalf("unexpected error finishing layer upload: %v", err)
}
// Test for existence.
statDesc, err := sbs.Stat(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs)
}
if statDesc != desc {
t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
}
bs := repository.Blobs(ctx)
// Test destination for existence.
statDesc, err = bs.Stat(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected non-error stating unmounted blob: %v", desc)
}
canonicalRef, err := reference.WithDigest(sourceRepository.Name(), desc.Digest)
if err != nil {
t.Fatal(err)
}
bw, err := bs.Create(ctx, WithMountFrom(canonicalRef))
if bw != nil {
t.Fatal("unexpected blobwriter returned from Create call, should mount instead")
}
ebm, ok := err.(distribution.ErrBlobMounted)
if !ok {
t.Fatalf("unexpected error mounting layer: %v", err)
}
if ebm.Descriptor != desc {
t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc)
}
// Test for existence.
statDesc, err = bs.Stat(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
}
if statDesc != desc {
t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
}
rc, err := bs.Open(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error opening blob for read: %v", err)
}
defer rc.Close()
h := sha256.New()
nn, err = io.Copy(h, rc)
if err != nil {
t.Fatalf("error reading layer: %v", err)
}
if nn != randomDataSize {
t.Fatalf("incorrect read length")
}
if digest.NewDigest("sha256", h) != dgst {
t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst)
}
// Delete the blob from the source repo
err = sbs.Delete(ctx, desc.Digest)
if err != nil {
t.Fatalf("Unexpected error deleting blob")
}
d, err := bs.Stat(ctx, desc.Digest)
if err != nil {
t.Fatalf("unexpected error stating blob deleted from source repository: %v", err)
}
d, err = sbs.Stat(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected non-error stating deleted blob: %v", d)
}
switch err {
case distribution.ErrBlobUnknown:
break
default:
t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
}
// Delete the blob from the dest repo
err = bs.Delete(ctx, desc.Digest)
if err != nil {
t.Fatalf("Unexpected error deleting blob")
}
d, err = bs.Stat(ctx, desc.Digest)
if err == nil {
t.Fatalf("unexpected non-error stating deleted blob: %v", d)
}
switch err {
case distribution.ErrBlobUnknown:
break
default:
t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
}
}
// TestLayerUploadZeroLength uploads zero-length
func TestLayerUploadZeroLength(t *testing.T) {
ctx := context.Background()
imageName, _ := reference.ParseNamed("foo/bar")
driver := inmemory.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repository, err := registry.Repository(ctx, imageName)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
bs := repository.Blobs(ctx)
simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar)
}
func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) {
ctx := context.Background()
wr, err := bs.Create(ctx)
if err != nil {
t.Fatalf("unexpected error starting upload: %v", err)
}
nn, err := io.Copy(wr, bytes.NewReader(blob))
if err != nil {
t.Fatalf("error copying into blob writer: %v", err)
}
if nn != 0 {
t.Fatalf("unexpected number of bytes copied: %v > 0", nn)
}
dgst, err := digest.FromReader(bytes.NewReader(blob))
if err != nil {
t.Fatalf("error getting digest: %v", err)
}
if dgst != expectedDigest {
// sanity check on zero digest
t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest)
}
desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst})
if err != nil {
t.Fatalf("unexpected error committing write: %v", err)
}
if desc.Digest != dgst {
t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst)
}
}
// seekerSize seeks to the end of seeker, checks the size and returns it to
// the original state, returning the size. The state of the seeker should be
// treated as unknown if an error is returned.
func seekerSize(seeker io.ReadSeeker) (int64, error) {
current, err := seeker.Seek(0, os.SEEK_CUR)
if err != nil {
return 0, err
}
end, err := seeker.Seek(0, os.SEEK_END)
if err != nil {
return 0, err
}
resumed, err := seeker.Seek(current, os.SEEK_SET)
if err != nil {
return 0, err
}
if resumed != current {
return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location")
}
return end, nil
}
// addBlob simply consumes the reader and inserts into the blob service,
// returning a descriptor on success.
func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) {
wr, err := bs.Create(ctx)
if err != nil {
return distribution.Descriptor{}, err
}
defer wr.Cancel(ctx)
if nn, err := io.Copy(wr, rd); err != nil {
return distribution.Descriptor{}, err
} else if nn != desc.Size {
return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size)
}
return wr.Commit(ctx, desc)
}

View File

@@ -1,60 +0,0 @@
package storage
import (
"expvar"
"sync/atomic"
"github.com/docker/distribution/registry/storage/cache"
)
type blobStatCollector struct {
metrics cache.Metrics
}
func (bsc *blobStatCollector) Hit() {
atomic.AddUint64(&bsc.metrics.Requests, 1)
atomic.AddUint64(&bsc.metrics.Hits, 1)
}
func (bsc *blobStatCollector) Miss() {
atomic.AddUint64(&bsc.metrics.Requests, 1)
atomic.AddUint64(&bsc.metrics.Misses, 1)
}
func (bsc *blobStatCollector) Metrics() cache.Metrics {
return bsc.metrics
}
// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor
// cache requests. Note this is kept globally and made available via expvar.
// For more detailed metrics, its recommend to instrument a particular cache
// implementation.
var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{}
func init() {
registry := expvar.Get("registry")
if registry == nil {
registry = expvar.NewMap("registry")
}
cache := registry.(*expvar.Map).Get("cache")
if cache == nil {
cache = &expvar.Map{}
cache.(*expvar.Map).Init()
registry.(*expvar.Map).Set("cache", cache)
}
storage := cache.(*expvar.Map).Get("storage")
if storage == nil {
storage = &expvar.Map{}
storage.(*expvar.Map).Init()
cache.(*expvar.Map).Set("storage", storage)
}
storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} {
// no need for synchronous access: the increments are atomic and
// during reading, we don't care if the data is up to date. The
// numbers will always *eventually* be reported correctly.
return blobStatterCacheMetrics
}))
}

View File

@@ -1,78 +0,0 @@
package storage
import (
"fmt"
"net/http"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/storage/driver"
)
// TODO(stevvooe): This should configurable in the future.
const blobCacheControlMaxAge = 365 * 24 * time.Hour
// blobServer simply serves blobs from a driver instance using a path function
// to identify paths and a descriptor service to fill in metadata.
type blobServer struct {
driver driver.StorageDriver
statter distribution.BlobStatter
pathFn func(dgst digest.Digest) (string, error)
redirect bool // allows disabling URLFor redirects
}
func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
desc, err := bs.statter.Stat(ctx, dgst)
if err != nil {
return err
}
path, err := bs.pathFn(desc.Digest)
if err != nil {
return err
}
if bs.redirect {
redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method})
switch err.(type) {
case nil:
// Redirect to storage URL.
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
return err
case driver.ErrUnsupportedMethod:
// Fallback to serving the content directly.
default:
// Some unexpected error.
return err
}
}
br, err := newFileReader(ctx, bs.driver, path, desc.Size)
if err != nil {
return err
}
defer br.Close()
w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent
w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds()))
if w.Header().Get("Docker-Content-Digest") == "" {
w.Header().Set("Docker-Content-Digest", desc.Digest.String())
}
if w.Header().Get("Content-Type") == "" {
// Set the content type if not already set.
w.Header().Set("Content-Type", desc.MediaType)
}
if w.Header().Get("Content-Length") == "" {
// Set the content length if not already set.
w.Header().Set("Content-Length", fmt.Sprint(desc.Size))
}
http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br)
return nil
}

View File

@@ -1,192 +0,0 @@
package storage
import (
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/storage/driver"
)
// blobStore implements the read side of the blob store interface over a
// driver without enforcing per-repository membership. This object is
// intentionally a leaky abstraction, providing utility methods that support
// creating and traversing backend links.
type blobStore struct {
driver driver.StorageDriver
statter distribution.BlobStatter
}
var _ distribution.BlobProvider = &blobStore{}
// Get implements the BlobReadService.Get call.
func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
bp, err := bs.path(dgst)
if err != nil {
return nil, err
}
p, err := bs.driver.GetContent(ctx, bp)
if err != nil {
switch err.(type) {
case driver.PathNotFoundError:
return nil, distribution.ErrBlobUnknown
}
return nil, err
}
return p, err
}
func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
desc, err := bs.statter.Stat(ctx, dgst)
if err != nil {
return nil, err
}
path, err := bs.path(desc.Digest)
if err != nil {
return nil, err
}
return newFileReader(ctx, bs.driver, path, desc.Size)
}
// Put stores the content p in the blob store, calculating the digest. If the
// content is already present, only the digest will be returned. This should
// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations
func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
dgst := digest.FromBytes(p)
desc, err := bs.statter.Stat(ctx, dgst)
if err == nil {
// content already present
return desc, nil
} else if err != distribution.ErrBlobUnknown {
context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err)
// real error, return it
return distribution.Descriptor{}, err
}
bp, err := bs.path(dgst)
if err != nil {
return distribution.Descriptor{}, err
}
// TODO(stevvooe): Write out mediatype here, as well.
return distribution.Descriptor{
Size: int64(len(p)),
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}, bs.driver.PutContent(ctx, bp, p)
}
// path returns the canonical path for the blob identified by digest. The blob
// may or may not exist.
func (bs *blobStore) path(dgst digest.Digest) (string, error) {
bp, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return "", err
}
return bp, nil
}
// link links the path to the provided digest by writing the digest into the
// target file. Caller must ensure that the blob actually exists.
func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error {
// The contents of the "link" file are the exact string contents of the
// digest, which is specified in that package.
return bs.driver.PutContent(ctx, path, []byte(dgst))
}
// readlink returns the linked digest at path.
func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) {
content, err := bs.driver.GetContent(ctx, path)
if err != nil {
return "", err
}
linked, err := digest.ParseDigest(string(content))
if err != nil {
return "", err
}
return linked, nil
}
// resolve reads the digest link at path and returns the blob store path.
func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) {
dgst, err := bs.readlink(ctx, path)
if err != nil {
return "", err
}
return bs.path(dgst)
}
type blobStatter struct {
driver driver.StorageDriver
}
var _ distribution.BlobDescriptorService = &blobStatter{}
// Stat implements BlobStatter.Stat by returning the descriptor for the blob
// in the main blob store. If this method returns successfully, there is
// strong guarantee that the blob exists and is available.
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
path, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return distribution.Descriptor{}, err
}
fi, err := bs.driver.Stat(ctx, path)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return distribution.Descriptor{}, distribution.ErrBlobUnknown
default:
return distribution.Descriptor{}, err
}
}
if fi.IsDir() {
// NOTE(stevvooe): This represents a corruption situation. Somehow, we
// calculated a blob path and then detected a directory. We log the
// error and then error on the side of not knowing about the blob.
context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path)
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
// TODO(stevvooe): Add method to resolve the mediatype. We can store and
// cache a "global" media type for the blob, even if a specific repo has a
// mediatype that overrides the main one.
return distribution.Descriptor{
Size: fi.Size(),
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}, nil
}
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
return distribution.ErrUnsupported
}
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
return distribution.ErrUnsupported
}

View File

@@ -1,380 +0,0 @@
package storage
import (
"errors"
"fmt"
"io"
"path"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
var (
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
// layerWriter is used to control the various aspects of resumable
// layer upload. It implements the LayerUpload interface.
type blobWriter struct {
blobStore *linkedBlobStore
id string
startedAt time.Time
digester digest.Digester
written int64 // track the contiguous write
// implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy
// LayerUpload Interface
bufferedFileWriter
resumableDigestEnabled bool
}
var _ distribution.BlobWriter = &blobWriter{}
// ID returns the identifier for this upload.
func (bw *blobWriter) ID() string {
return bw.id
}
func (bw *blobWriter) StartedAt() time.Time {
return bw.startedAt
}
// Commit marks the upload as completed, returning a valid descriptor. The
// final size and digest are checked against the first descriptor provided.
func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
context.GetLogger(ctx).Debug("(*blobWriter).Commit")
if err := bw.bufferedFileWriter.Close(); err != nil {
return distribution.Descriptor{}, err
}
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
return distribution.Descriptor{}, err
}
if err := bw.moveBlob(ctx, canonical); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.removeResources(ctx); err != nil {
return distribution.Descriptor{}, err
}
err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical)
if err != nil {
return distribution.Descriptor{}, err
}
return canonical, nil
}
// Rollback the blob upload process, releasing any resources associated with
// the writer and canceling the operation.
func (bw *blobWriter) Cancel(ctx context.Context) error {
context.GetLogger(ctx).Debug("(*blobWriter).Rollback")
if err := bw.removeResources(ctx); err != nil {
return err
}
bw.Close()
return nil
}
func (bw *blobWriter) Write(p []byte) (int, error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p)
bw.written += int64(n)
return n, err
}
func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash()))
bw.written += nn
return nn, err
}
func (bw *blobWriter) Close() error {
if bw.err != nil {
return bw.err
}
if err := bw.storeHashState(bw.blobStore.ctx); err != nil {
return err
}
return bw.bufferedFileWriter.Close()
}
// validateBlob checks the data against the digest, returning an error if it
// does not match. The canonical descriptor is returned.
func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
var (
verified, fullHash bool
canonical digest.Digest
)
if desc.Digest == "" {
// if no descriptors are provided, we have nothing to validate
// against. We don't really want to support this for the registry.
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Reason: fmt.Errorf("cannot validate against empty digest"),
}
}
// Stat the on disk file
if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// NOTE(stevvooe): We really don't care if the file is
// not actually present for the reader. We now assume
// that the desc length is zero.
desc.Size = 0
default:
// Any other error we want propagated up the stack.
return distribution.Descriptor{}, err
}
} else {
if fi.IsDir() {
return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
}
bw.size = fi.Size()
}
if desc.Size > 0 {
if desc.Size != bw.size {
return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
}
} else {
// if provided 0 or negative length, we can assume caller doesn't know or
// care about length.
desc.Size = bw.size
}
// TODO(stevvooe): This section is very meandering. Need to be broken down
// to be a lot more clear.
if err := bw.resumeDigestAt(ctx, bw.size); err == nil {
canonical = bw.digester.Digest()
if canonical.Algorithm() == desc.Digest.Algorithm() {
// Common case: client and server prefer the same canonical digest
// algorithm - currently SHA256.
verified = desc.Digest == canonical
} else {
// The client wants to use a different digest algorithm. They'll just
// have to be patient and wait for us to download and re-hash the
// uploaded content using that digest algorithm.
fullHash = true
}
} else if err == errResumableDigestNotAvailable {
// Not using resumable digests, so we need to hash the entire layer.
fullHash = true
} else {
return distribution.Descriptor{}, err
}
if fullHash {
// a fantastic optimization: if the the written data and the size are
// the same, we don't need to read the data from the backend. This is
// because we've written the entire file in the lifecycle of the
// current instance.
if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() {
canonical = bw.digester.Digest()
verified = desc.Digest == canonical
}
// If the check based on size fails, we fall back to the slowest of
// paths. We may be able to make the size-based check a stronger
// guarantee, so this may be defensive.
if !verified {
digester := digest.Canonical.New()
digestVerifier, err := digest.NewDigestVerifier(desc.Digest)
if err != nil {
return distribution.Descriptor{}, err
}
// Read the file from the backend driver and validate it.
fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size)
if err != nil {
return distribution.Descriptor{}, err
}
defer fr.Close()
tr := io.TeeReader(fr, digester.Hash())
if _, err := io.Copy(digestVerifier, tr); err != nil {
return distribution.Descriptor{}, err
}
canonical = digester.Digest()
verified = digestVerifier.Verified()
}
}
if !verified {
context.GetLoggerWithFields(ctx,
map[interface{}]interface{}{
"canonical": canonical,
"provided": desc.Digest,
}, "canonical", "provided").
Errorf("canonical digest does match provided digest")
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Digest: desc.Digest,
Reason: fmt.Errorf("content does not match digest"),
}
}
// update desc with canonical hash
desc.Digest = canonical
if desc.MediaType == "" {
desc.MediaType = "application/octet-stream"
}
return desc, nil
}
// moveBlob moves the data into its final, hash-qualified destination,
// identified by dgst. The layer should be validated before commencing the
// move.
func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error {
blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest,
})
if err != nil {
return err
}
// Check for existence
if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // ensure that it doesn't exist.
default:
return err
}
} else {
// If the path exists, we can assume that the content has already
// been uploaded, since the blob storage is content-addressable.
// While it may be corrupted, detection of such corruption belongs
// elsewhere.
return nil
}
// If no data was received, we may not actually have a file on disk. Check
// the size here and write a zero-length file to blobPath if this is the
// case. For the most part, this should only ever happen with zero-length
// tars.
if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// HACK(stevvooe): This is slightly dangerous: if we verify above,
// get a hash, then the underlying file is deleted, we risk moving
// a zero-length blob into a nonzero-length blob location. To
// prevent this horrid thing, we employ the hack of only allowing
// to this happen for the digest of an empty tar.
if desc.Digest == digest.DigestSha256EmptyTar {
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
}
// We let this fail during the move below.
logrus.
WithField("upload.id", bw.ID()).
WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest")
default:
return err // unrelated error
}
}
// TODO(stevvooe): We should also write the mediatype when executing this move.
return bw.blobStore.driver.Move(ctx, bw.path, blobPath)
}
// removeResources should clean up all resources associated with the upload
// instance. An error will be returned if the clean up cannot proceed. If the
// resources are already not present, no error will be returned.
func (bw *blobWriter) removeResources(ctx context.Context) error {
dataPath, err := pathFor(uploadDataPathSpec{
name: bw.blobStore.repository.Name().Name(),
id: bw.id,
})
if err != nil {
return err
}
// Resolve and delete the containing directory, which should include any
// upload related files.
dirPath := path.Dir(dataPath)
if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // already gone!
default:
// This should be uncommon enough such that returning an error
// should be okay. At this point, the upload should be mostly
// complete, but perhaps the backend became unaccessible.
context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err)
return err
}
}
return nil
}
func (bw *blobWriter) Reader() (io.ReadCloser, error) {
// todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4
try := 1
for try <= 5 {
_, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path)
if err == nil {
break
}
switch err.(type) {
case storagedriver.PathNotFoundError:
context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try)
time.Sleep(1 * time.Second)
try++
default:
return nil, err
}
}
readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0)
if err != nil {
return nil, err
}
return readCloser, nil
}

View File

@@ -1,17 +0,0 @@
// +build noresumabledigest
package storage
import (
"github.com/docker/distribution/context"
)
// resumeHashAt is a noop when resumable digest support is disabled.
func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error {
return errResumableDigestNotAvailable
}
// storeHashState is a noop when resumable digest support is disabled.
func (bw *blobWriter) storeHashState(ctx context.Context) error {
return errResumableDigestNotAvailable
}

View File

@@ -1,178 +0,0 @@
// +build !noresumabledigest
package storage
import (
"fmt"
"io"
"os"
"path"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/stevvooe/resumable"
// register resumable hashes with import
_ "github.com/stevvooe/resumable/sha256"
_ "github.com/stevvooe/resumable/sha512"
)
// resumeDigestAt attempts to restore the state of the internal hash function
// by loading the most recent saved hash state less than or equal to the given
// offset. Any unhashed bytes remaining less than the given offset are hashed
// from the content uploaded so far.
func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
if offset < 0 {
return fmt.Errorf("cannot resume hash at negative offset: %d", offset)
}
h, ok := bw.digester.Hash().(resumable.Hash)
if !ok {
return errResumableDigestNotAvailable
}
if offset == int64(h.Len()) {
// State of digester is already at the requested offset.
return nil
}
// List hash states from storage backend.
var hashStateMatch hashStateEntry
hashStates, err := bw.getStoredHashStates(ctx)
if err != nil {
return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err)
}
// Find the highest stored hashState with offset less than or equal to
// the requested offset.
for _, hashState := range hashStates {
if hashState.offset == offset {
hashStateMatch = hashState
break // Found an exact offset match.
} else if hashState.offset < offset && hashState.offset > hashStateMatch.offset {
// This offset is closer to the requested offset.
hashStateMatch = hashState
} else if hashState.offset > offset {
// Remove any stored hash state with offsets higher than this one
// as writes to this resumed hasher will make those invalid. This
// is probably okay to skip for now since we don't expect anyone to
// use the API in this way. For that reason, we don't treat an
// an error here as a fatal error, but only log it.
if err := bw.driver.Delete(ctx, hashState.path); err != nil {
logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err)
}
}
}
if hashStateMatch.offset == 0 {
// No need to load any state, just reset the hasher.
h.Reset()
} else {
storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path)
if err != nil {
return err
}
if err = h.Restore(storedState); err != nil {
return err
}
}
// Mind the gap.
if gapLen := offset - int64(h.Len()); gapLen > 0 {
// Need to read content from the upload to catch up to the desired offset.
fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size)
if err != nil {
return err
}
defer fr.Close()
if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil {
return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err)
}
if _, err := io.CopyN(h, fr, gapLen); err != nil {
return err
}
}
return nil
}
type hashStateEntry struct {
offset int64
path string
}
// getStoredHashStates returns a slice of hashStateEntries for this upload.
func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) {
uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Name().String(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
list: true,
})
if err != nil {
return nil, err
}
paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix)
if err != nil {
if _, ok := err.(storagedriver.PathNotFoundError); !ok {
return nil, err
}
// Treat PathNotFoundError as no entries.
paths = nil
}
hashStateEntries := make([]hashStateEntry, 0, len(paths))
for _, p := range paths {
pathSuffix := path.Base(p)
// The suffix should be the offset.
offset, err := strconv.ParseInt(pathSuffix, 0, 64)
if err != nil {
logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err)
}
hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p})
}
return hashStateEntries, nil
}
func (bw *blobWriter) storeHashState(ctx context.Context) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
h, ok := bw.digester.Hash().(resumable.Hash)
if !ok {
return errResumableDigestNotAvailable
}
uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Name().String(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
offset: int64(h.Len()),
})
if err != nil {
return err
}
hashState, err := h.State()
if err != nil {
return err
}
return bw.driver.PutContent(ctx, uploadHashStatePath, hashState)
}

View File

@@ -1,13 +0,0 @@
package memory
import (
"testing"
"github.com/docker/distribution/registry/storage/cache/cachecheck"
)
// TestInMemoryBlobInfoCache checks the in memory implementation is working
// correctly.
func TestInMemoryBlobInfoCache(t *testing.T) {
cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider())
}

View File

@@ -1,66 +0,0 @@
package storage
import (
"errors"
"io"
"path"
"strings"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
)
// ErrFinishedWalk is used when the called walk function no longer wants
// to accept any more values. This is used for pagination when the
// required number of repos have been found.
var ErrFinishedWalk = errors.New("finished walk")
// Returns a list, or partial list, of repositories in the registry.
// Because it's a quite expensive operation, it should only be used when building up
// an initial set of repositories.
func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, errVal error) {
var foundRepos []string
if len(repos) == 0 {
return 0, errors.New("no space in slice")
}
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return 0, err
}
err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error {
filePath := fileInfo.Path()
// lop the base path off
repoPath := filePath[len(root)+1:]
_, file := path.Split(repoPath)
if file == "_layers" {
repoPath = strings.TrimSuffix(repoPath, "/_layers")
if repoPath > last {
foundRepos = append(foundRepos, repoPath)
}
return ErrSkipDir
} else if strings.HasPrefix(file, "_") {
return ErrSkipDir
}
// if we've filled our array, no need to walk any further
if len(foundRepos) == len(repos) {
return ErrFinishedWalk
}
return nil
})
n = copy(repos, foundRepos)
// Signal that we have no more entries by setting EOF
if len(foundRepos) <= len(repos) && err != ErrFinishedWalk {
errVal = io.EOF
}
return n, errVal
}

View File

@@ -1,125 +0,0 @@
package storage
import (
"io"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
)
type setupEnv struct {
ctx context.Context
driver driver.StorageDriver
expected []string
registry distribution.Namespace
}
func setupFS(t *testing.T) *setupEnv {
d := inmemory.New()
c := []byte("")
ctx := context.Background()
registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
rootpath, _ := pathFor(repositoriesRootPathSpec{})
repos := []string{
"/foo/a/_layers/1",
"/foo/b/_layers/2",
"/bar/c/_layers/3",
"/bar/d/_layers/4",
"/foo/d/in/_layers/5",
"/an/invalid/repo",
"/bar/d/_layers/ignored/dir/6",
}
for _, repo := range repos {
if err := d.PutContent(ctx, rootpath+repo, c); err != nil {
t.Fatalf("Unable to put to inmemory fs")
}
}
expected := []string{
"bar/c",
"bar/d",
"foo/a",
"foo/b",
"foo/d/in",
}
return &setupEnv{
ctx: ctx,
driver: d,
expected: expected,
registry: registry,
}
}
func TestCatalog(t *testing.T) {
env := setupFS(t)
p := make([]string, 50)
numFilled, err := env.registry.Repositories(env.ctx, p, "")
if !testEq(p, env.expected, numFilled) {
t.Errorf("Expected catalog repos err")
}
if err != io.EOF {
t.Errorf("Catalog has more values which we aren't expecting")
}
}
func TestCatalogInParts(t *testing.T) {
env := setupFS(t)
chunkLen := 2
p := make([]string, chunkLen)
numFilled, err := env.registry.Repositories(env.ctx, p, "")
if err == io.EOF || numFilled != len(p) {
t.Errorf("Expected more values in catalog")
}
if !testEq(p, env.expected[0:chunkLen], numFilled) {
t.Errorf("Expected catalog first chunk err")
}
lastRepo := p[len(p)-1]
numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo)
if err == io.EOF || numFilled != len(p) {
t.Errorf("Expected more values in catalog")
}
if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) {
t.Errorf("Expected catalog second chunk err")
}
lastRepo = p[len(p)-1]
numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo)
if err != io.EOF {
t.Errorf("Catalog has more values which we aren't expecting")
}
if !testEq(p, env.expected[chunkLen*2:chunkLen*3-1], numFilled) {
t.Errorf("Expected catalog third chunk err")
}
}
func testEq(a, b []string, size int) bool {
for cnt := 0; cnt < size-1; cnt++ {
if a[cnt] != b[cnt] {
return false
}
}
return true
}

View File

@@ -1,3 +0,0 @@
// Package storage contains storage services for use in the registry
// application. It should be considered an internal package, as of Go 1.4.
package storage

View File

@@ -1,177 +0,0 @@
package storage
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
// TODO(stevvooe): Set an optimal buffer size here. We'll have to
// understand the latency characteristics of the underlying network to
// set this correctly, so we may want to leave it to the driver. For
// out of process drivers, we'll have to optimize this buffer size for
// local communication.
const fileReaderBufferSize = 4 << 20
// remoteFileReader provides a read seeker interface to files stored in
// storagedriver. Used to implement part of layer interface and will be used
// to implement read side of LayerUpload.
type fileReader struct {
driver storagedriver.StorageDriver
ctx context.Context
// identifying fields
path string
size int64 // size is the total size, must be set.
// mutable fields
rc io.ReadCloser // remote read closer
brd *bufio.Reader // internal buffered io
offset int64 // offset is the current read offset
err error // terminal error, if set, reader is closed
}
// newFileReader initializes a file reader for the remote file. The reader
// takes on the size and path that must be determined externally with a stat
// call. The reader operates optimistically, assuming that the file is already
// there.
func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) {
return &fileReader{
ctx: ctx,
driver: driver,
path: path,
size: size,
}, nil
}
func (fr *fileReader) Read(p []byte) (n int, err error) {
if fr.err != nil {
return 0, fr.err
}
rd, err := fr.reader()
if err != nil {
return 0, err
}
n, err = rd.Read(p)
fr.offset += int64(n)
// Simulate io.EOR error if we reach filesize.
if err == nil && fr.offset >= fr.size {
err = io.EOF
}
return n, err
}
func (fr *fileReader) Seek(offset int64, whence int) (int64, error) {
if fr.err != nil {
return 0, fr.err
}
var err error
newOffset := fr.offset
switch whence {
case os.SEEK_CUR:
newOffset += int64(offset)
case os.SEEK_END:
newOffset = fr.size + int64(offset)
case os.SEEK_SET:
newOffset = int64(offset)
}
if newOffset < 0 {
err = fmt.Errorf("cannot seek to negative position")
} else {
if fr.offset != newOffset {
fr.reset()
}
// No problems, set the offset.
fr.offset = newOffset
}
return fr.offset, err
}
func (fr *fileReader) Close() error {
return fr.closeWithErr(fmt.Errorf("fileReader: closed"))
}
// reader prepares the current reader at the lrs offset, ensuring its buffered
// and ready to go.
func (fr *fileReader) reader() (io.Reader, error) {
if fr.err != nil {
return nil, fr.err
}
if fr.rc != nil {
return fr.brd, nil
}
// If we don't have a reader, open one up.
rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset)
if err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// NOTE(stevvooe): If the path is not found, we simply return a
// reader that returns io.EOF. However, we do not set fr.rc,
// allowing future attempts at getting a reader to possibly
// succeed if the file turns up later.
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
default:
return nil, err
}
}
fr.rc = rc
if fr.brd == nil {
fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize)
} else {
fr.brd.Reset(fr.rc)
}
return fr.brd, nil
}
// resetReader resets the reader, forcing the read method to open up a new
// connection and rebuild the buffered reader. This should be called when the
// offset and the reader will become out of sync, such as during a seek
// operation.
func (fr *fileReader) reset() {
if fr.err != nil {
return
}
if fr.rc != nil {
fr.rc.Close()
fr.rc = nil
}
}
func (fr *fileReader) closeWithErr(err error) error {
if fr.err != nil {
return fr.err
}
fr.err = err
// close and release reader chain
if fr.rc != nil {
fr.rc.Close()
}
fr.rc = nil
fr.brd = nil
return fr.err
}

View File

@@ -1,199 +0,0 @@
package storage
import (
"bytes"
"crypto/rand"
"io"
mrand "math/rand"
"os"
"testing"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/storage/driver/inmemory"
)
func TestSimpleRead(t *testing.T) {
ctx := context.Background()
content := make([]byte, 1<<20)
n, err := rand.Read(content)
if err != nil {
t.Fatalf("unexpected error building random data: %v", err)
}
if n != len(content) {
t.Fatalf("random read didn't fill buffer")
}
dgst, err := digest.FromReader(bytes.NewReader(content))
if err != nil {
t.Fatalf("unexpected error digesting random content: %v", err)
}
driver := inmemory.New()
path := "/random"
if err := driver.PutContent(ctx, path, content); err != nil {
t.Fatalf("error putting patterned content: %v", err)
}
fr, err := newFileReader(ctx, driver, path, int64(len(content)))
if err != nil {
t.Fatalf("error allocating file reader: %v", err)
}
verifier, err := digest.NewDigestVerifier(dgst)
if err != nil {
t.Fatalf("error getting digest verifier: %s", err)
}
io.Copy(verifier, fr)
if !verifier.Verified() {
t.Fatalf("unable to verify read data")
}
}
func TestFileReaderSeek(t *testing.T) {
driver := inmemory.New()
pattern := "01234567890ab" // prime length block
repititions := 1024
path := "/patterned"
content := bytes.Repeat([]byte(pattern), repititions)
ctx := context.Background()
if err := driver.PutContent(ctx, path, content); err != nil {
t.Fatalf("error putting patterned content: %v", err)
}
fr, err := newFileReader(ctx, driver, path, int64(len(content)))
if err != nil {
t.Fatalf("unexpected error creating file reader: %v", err)
}
// Seek all over the place, in blocks of pattern size and make sure we get
// the right data.
for _, repitition := range mrand.Perm(repititions - 1) {
targetOffset := int64(len(pattern) * repitition)
// Seek to a multiple of pattern size and read pattern size bytes
offset, err := fr.Seek(targetOffset, os.SEEK_SET)
if err != nil {
t.Fatalf("unexpected error seeking: %v", err)
}
if offset != targetOffset {
t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset)
}
p := make([]byte, len(pattern))
n, err := fr.Read(p)
if err != nil {
t.Fatalf("error reading pattern: %v", err)
}
if n != len(pattern) {
t.Fatalf("incorrect read length: %d != %d", n, len(pattern))
}
if string(p) != pattern {
t.Fatalf("incorrect read content: %q != %q", p, pattern)
}
// Check offset
current, err := fr.Seek(0, os.SEEK_CUR)
if err != nil {
t.Fatalf("error checking current offset: %v", err)
}
if current != targetOffset+int64(len(pattern)) {
t.Fatalf("unexpected offset after read: %v", err)
}
}
start, err := fr.Seek(0, os.SEEK_SET)
if err != nil {
t.Fatalf("error seeking to start: %v", err)
}
if start != 0 {
t.Fatalf("expected to seek to start: %v != 0", start)
}
end, err := fr.Seek(0, os.SEEK_END)
if err != nil {
t.Fatalf("error checking current offset: %v", err)
}
if end != int64(len(content)) {
t.Fatalf("expected to seek to end: %v != %v", end, len(content))
}
// 4. Seek before start, ensure error.
// seek before start
before, err := fr.Seek(-1, os.SEEK_SET)
if err == nil {
t.Fatalf("error expected, returned offset=%v", before)
}
// 5. Seek after end,
after, err := fr.Seek(1, os.SEEK_END)
if err != nil {
t.Fatalf("unexpected error expected, returned offset=%v", after)
}
p := make([]byte, 16)
n, err := fr.Read(p)
if n != 0 {
t.Fatalf("bytes reads %d != %d", n, 0)
}
if err != io.EOF {
t.Fatalf("expected io.EOF, got %v", err)
}
}
// TestFileReaderNonExistentFile ensures the reader behaves as expected with a
// missing or zero-length remote file. While the file may not exist, the
// reader should not error out on creation and should return 0-bytes from the
// read method, with an io.EOF error.
func TestFileReaderNonExistentFile(t *testing.T) {
driver := inmemory.New()
fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10)
if err != nil {
t.Fatalf("unexpected error initializing reader: %v", err)
}
var buf [1024]byte
n, err := fr.Read(buf[:])
if n != 0 {
t.Fatalf("non-zero byte read reported: %d != 0", n)
}
if err != io.EOF {
t.Fatalf("read on missing file should return io.EOF, got %v", err)
}
}
// TestLayerReadErrors covers the various error return type for different
// conditions that can arise when reading a layer.
func TestFileReaderErrors(t *testing.T) {
// TODO(stevvooe): We need to cover error return types, driven by the
// errors returned via the HTTP API. For now, here is a incomplete list:
//
// 1. Layer Not Found: returned when layer is not found or access is
// denied.
// 2. Layer Unavailable: returned when link references are unresolved,
// but layer is known to the registry.
// 3. Layer Invalid: This may more split into more errors, but should be
// returned when name or tarsum does not reference a valid error. We
// may also need something to communication layer verification errors
// for the inline tarsum check.
// 4. Timeout: timeouts to backend. Need to better understand these
// failure cases and how the storage driver propagates these errors
// up the stack.
}

View File

@@ -1,180 +0,0 @@
package storage
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
const (
fileWriterBufferSize = 5 << 20
)
// fileWriter implements a remote file writer backed by a storage driver.
type fileWriter struct {
driver storagedriver.StorageDriver
ctx context.Context
// identifying fields
path string
// mutable fields
size int64 // size of the file, aka the current end
offset int64 // offset is the current write offset
err error // terminal error, if set, reader is closed
}
type bufferedFileWriter struct {
fileWriter
bw *bufio.Writer
}
// fileWriterInterface makes the desired io compliant interface that the
// filewriter should implement.
type fileWriterInterface interface {
io.WriteSeeker
io.ReaderFrom
io.Closer
}
var _ fileWriterInterface = &fileWriter{}
// newFileWriter returns a prepared fileWriter for the driver and path. This
// could be considered similar to an "open" call on a regular filesystem.
func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) {
fw := fileWriter{
driver: driver,
path: path,
ctx: ctx,
}
if fi, err := driver.Stat(ctx, path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// ignore, offset is zero
default:
return nil, err
}
} else {
if fi.IsDir() {
return nil, fmt.Errorf("cannot write to a directory")
}
fw.size = fi.Size()
}
buffered := bufferedFileWriter{
fileWriter: fw,
}
buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize)
return &buffered, nil
}
// wraps the fileWriter.Write method to buffer small writes
func (bfw *bufferedFileWriter) Write(p []byte) (int, error) {
return bfw.bw.Write(p)
}
// wraps fileWriter.Close to ensure the buffer is flushed
// before we close the writer.
func (bfw *bufferedFileWriter) Close() (err error) {
if err = bfw.Flush(); err != nil {
return err
}
err = bfw.fileWriter.Close()
return err
}
// wraps fileWriter.Seek to ensure offset is handled
// correctly in respect to pending data in the buffer
func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) {
if err := bfw.Flush(); err != nil {
return 0, err
}
return bfw.fileWriter.Seek(offset, whence)
}
// wraps bufio.Writer.Flush to allow intermediate flushes
// of the bufferedFileWriter
func (bfw *bufferedFileWriter) Flush() error {
return bfw.bw.Flush()
}
// Write writes the buffer p at the current write offset.
func (fw *fileWriter) Write(p []byte) (n int, err error) {
nn, err := fw.ReadFrom(bytes.NewReader(p))
return int(nn), err
}
// ReadFrom reads reader r until io.EOF writing the contents at the current
// offset.
func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) {
if fw.err != nil {
return 0, fw.err
}
nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r)
// We should forward the offset, whether or not there was an error.
// Basically, we keep the filewriter in sync with the reader's head. If an
// error is encountered, the whole thing should be retried but we proceed
// from an expected offset, even if the data didn't make it to the
// backend.
fw.offset += nn
if fw.offset > fw.size {
fw.size = fw.offset
}
return nn, err
}
// Seek moves the write position do the requested offest based on the whence
// argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET.
func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) {
if fw.err != nil {
return 0, fw.err
}
var err error
newOffset := fw.offset
switch whence {
case os.SEEK_CUR:
newOffset += int64(offset)
case os.SEEK_END:
newOffset = fw.size + int64(offset)
case os.SEEK_SET:
newOffset = int64(offset)
}
if newOffset < 0 {
err = fmt.Errorf("cannot seek to negative position")
} else {
// No problems, set the offset.
fw.offset = newOffset
}
return fw.offset, err
}
// Close closes the fileWriter for writing.
// Calling it once is valid and correct and it will
// return a nil error. Calling it subsequent times will
// detect that fw.err has been set and will return the error.
func (fw *fileWriter) Close() error {
if fw.err != nil {
return fw.err
}
fw.err = fmt.Errorf("filewriter@%v: closed", fw.path)
return nil
}

View File

@@ -1,262 +0,0 @@
package storage
import (
"bytes"
"crypto/rand"
"io"
"os"
"testing"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
)
// TestSimpleWrite takes the fileWriter through common write operations
// ensuring data integrity.
func TestSimpleWrite(t *testing.T) {
content := make([]byte, 1<<20)
n, err := rand.Read(content)
if err != nil {
t.Fatalf("unexpected error building random data: %v", err)
}
if n != len(content) {
t.Fatalf("random read did't fill buffer")
}
dgst, err := digest.FromReader(bytes.NewReader(content))
if err != nil {
t.Fatalf("unexpected error digesting random content: %v", err)
}
driver := inmemory.New()
path := "/random"
ctx := context.Background()
fw, err := newFileWriter(ctx, driver, path)
if err != nil {
t.Fatalf("unexpected error creating fileWriter: %v", err)
}
defer fw.Close()
n, err = fw.Write(content)
if err != nil {
t.Fatalf("unexpected error writing content: %v", err)
}
fw.Flush()
if n != len(content) {
t.Fatalf("unexpected write length: %d != %d", n, len(content))
}
fr, err := newFileReader(ctx, driver, path, int64(len(content)))
if err != nil {
t.Fatalf("unexpected error creating fileReader: %v", err)
}
defer fr.Close()
verifier, err := digest.NewDigestVerifier(dgst)
if err != nil {
t.Fatalf("unexpected error getting digest verifier: %s", err)
}
io.Copy(verifier, fr)
if !verifier.Verified() {
t.Fatalf("unable to verify write data")
}
// Check the seek position is equal to the content length
end, err := fw.Seek(0, os.SEEK_END)
if err != nil {
t.Fatalf("unexpected error seeking: %v", err)
}
if end != int64(len(content)) {
t.Fatalf("write did not advance offset: %d != %d", end, len(content))
}
// Double the content
doubled := append(content, content...)
doubledgst, err := digest.FromReader(bytes.NewReader(doubled))
if err != nil {
t.Fatalf("unexpected error digesting doubled content: %v", err)
}
nn, err := fw.ReadFrom(bytes.NewReader(content))
if err != nil {
t.Fatalf("unexpected error doubling content: %v", err)
}
if nn != int64(len(content)) {
t.Fatalf("writeat was short: %d != %d", n, len(content))
}
fr, err = newFileReader(ctx, driver, path, int64(len(doubled)))
if err != nil {
t.Fatalf("unexpected error creating fileReader: %v", err)
}
defer fr.Close()
verifier, err = digest.NewDigestVerifier(doubledgst)
if err != nil {
t.Fatalf("unexpected error getting digest verifier: %s", err)
}
io.Copy(verifier, fr)
if !verifier.Verified() {
t.Fatalf("unable to verify write data")
}
// Check that Write updated the offset.
end, err = fw.Seek(0, os.SEEK_END)
if err != nil {
t.Fatalf("unexpected error seeking: %v", err)
}
if end != int64(len(doubled)) {
t.Fatalf("write did not advance offset: %d != %d", end, len(doubled))
}
// Now, we copy from one path to another, running the data through the
// fileReader to fileWriter, rather than the driver.Move command to ensure
// everything is working correctly.
fr, err = newFileReader(ctx, driver, path, int64(len(doubled)))
if err != nil {
t.Fatalf("unexpected error creating fileReader: %v", err)
}
defer fr.Close()
fw, err = newFileWriter(ctx, driver, "/copied")
if err != nil {
t.Fatalf("unexpected error creating fileWriter: %v", err)
}
defer fw.Close()
nn, err = io.Copy(fw, fr)
if err != nil {
t.Fatalf("unexpected error copying data: %v", err)
}
if nn != int64(len(doubled)) {
t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled))
}
fr, err = newFileReader(ctx, driver, "/copied", int64(len(doubled)))
if err != nil {
t.Fatalf("unexpected error creating fileReader: %v", err)
}
defer fr.Close()
verifier, err = digest.NewDigestVerifier(doubledgst)
if err != nil {
t.Fatalf("unexpected error getting digest verifier: %s", err)
}
io.Copy(verifier, fr)
if !verifier.Verified() {
t.Fatalf("unable to verify write data")
}
}
func TestBufferedFileWriter(t *testing.T) {
ctx := context.Background()
writer, err := newFileWriter(ctx, inmemory.New(), "/random")
if err != nil {
t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error())
}
// write one byte and ensure the offset hasn't been incremented.
// offset will only get incremented when the buffer gets flushed
short := []byte{byte(1)}
writer.Write(short)
if writer.offset > 0 {
t.Fatalf("WriteStream called prematurely")
}
// write enough data to cause the buffer to flush and confirm
// the offset has been incremented
long := make([]byte, fileWriterBufferSize)
_, err = rand.Read(long)
if err != nil {
t.Fatalf("unexpected error building random data: %v", err)
}
for i := range long {
long[i] = byte(i)
}
writer.Write(long)
writer.Close()
if writer.offset != (fileWriterBufferSize + 1) {
t.Fatalf("WriteStream not called when buffer capacity reached")
}
}
func BenchmarkFileWriter(b *testing.B) {
b.StopTimer() // not sure how long setup above will take
for i := 0; i < b.N; i++ {
// Start basic fileWriter initialization
fw := fileWriter{
driver: inmemory.New(),
path: "/random",
}
ctx := context.Background()
if fi, err := fw.driver.Stat(ctx, fw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// ignore, offset is zero
default:
b.Fatalf("Failed to initialize fileWriter: %v", err.Error())
}
} else {
if fi.IsDir() {
b.Fatalf("Cannot write to a directory")
}
fw.size = fi.Size()
}
randomBytes := make([]byte, 1<<20)
_, err := rand.Read(randomBytes)
if err != nil {
b.Fatalf("unexpected error building random data: %v", err)
}
// End basic file writer initialization
b.StartTimer()
for j := 0; j < 100; j++ {
fw.Write(randomBytes)
}
b.StopTimer()
}
}
func BenchmarkBufferedFileWriter(b *testing.B) {
b.StopTimer() // not sure how long setup above will take
ctx := context.Background()
for i := 0; i < b.N; i++ {
bfw, err := newFileWriter(ctx, inmemory.New(), "/random")
if err != nil {
b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error())
}
randomBytes := make([]byte, 1<<20)
_, err = rand.Read(randomBytes)
if err != nil {
b.Fatalf("unexpected error building random data: %v", err)
}
b.StartTimer()
for j := 0; j < 100; j++ {
bfw.Write(randomBytes)
}
b.StopTimer()
}
}

View File

@@ -1,415 +0,0 @@
package storage
import (
"fmt"
"net/http"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/uuid"
)
// linkPathFunc describes a function that can resolve a link based on the
// repository name and digest.
type linkPathFunc func(name string, dgst digest.Digest) (string, error)
// linkedBlobStore provides a full BlobService that namespaces the blobs to a
// given repository. Effectively, it manages the links in a given repository
// that grant access to the global blob store.
type linkedBlobStore struct {
*blobStore
registry *registry
blobServer distribution.BlobServer
blobAccessController distribution.BlobDescriptorService
repository distribution.Repository
ctx context.Context // only to be used where context can't come through method args
deleteEnabled bool
resumableDigestEnabled bool
// linkPathFns specifies one or more path functions allowing one to
// control the repository blob link set to which the blob store
// dispatches. This is required because manifest and layer blobs have not
// yet been fully merged. At some point, this functionality should be
// removed an the blob links folder should be merged. The first entry is
// treated as the "canonical" link location and will be used for writes.
linkPathFns []linkPathFunc
}
var _ distribution.BlobStore = &linkedBlobStore{}
func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return lbs.blobAccessController.Stat(ctx, dgst)
}
func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
canonical, err := lbs.Stat(ctx, dgst) // access check
if err != nil {
return nil, err
}
return lbs.blobStore.Get(ctx, canonical.Digest)
}
func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
canonical, err := lbs.Stat(ctx, dgst) // access check
if err != nil {
return nil, err
}
return lbs.blobStore.Open(ctx, canonical.Digest)
}
func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
canonical, err := lbs.Stat(ctx, dgst) // access check
if err != nil {
return err
}
if canonical.MediaType != "" {
// Set the repository local content type.
w.Header().Set("Content-Type", canonical.MediaType)
}
return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest)
}
func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
dgst := digest.FromBytes(p)
// Place the data in the blob store first.
desc, err := lbs.blobStore.Put(ctx, mediaType, p)
if err != nil {
context.GetLogger(ctx).Errorf("error putting into main store: %v", err)
return distribution.Descriptor{}, err
}
if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil {
return distribution.Descriptor{}, err
}
// TODO(stevvooe): Write out mediatype if incoming differs from what is
// returned by Put above. Note that we should allow updates for a given
// repository.
return desc, lbs.linkBlob(ctx, desc)
}
// createOptions is a collection of blob creation modifiers relevant to general
// blob storage intended to be configured by the BlobCreateOption.Apply method.
type createOptions struct {
Mount struct {
ShouldMount bool
From reference.Canonical
}
}
type optionFunc func(interface{}) error
func (f optionFunc) Apply(v interface{}) error {
return f(v)
}
// WithMountFrom returns a BlobCreateOption which designates that the blob should be
// mounted from the given canonical reference.
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
return optionFunc(func(v interface{}) error {
opts, ok := v.(*createOptions)
if !ok {
return fmt.Errorf("unexpected options type: %T", v)
}
opts.Mount.ShouldMount = true
opts.Mount.From = ref
return nil
})
}
// Writer begins a blob write session, returning a handle.
func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer")
var opts createOptions
for _, option := range options {
err := option.Apply(&opts)
if err != nil {
return nil, err
}
}
if opts.Mount.ShouldMount {
desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest())
if err == nil {
// Mount successful, no need to initiate an upload session
return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
}
}
uuid := uuid.Generate().String()
startedAt := time.Now().UTC()
path, err := pathFor(uploadDataPathSpec{
name: lbs.repository.Name().Name(),
id: uuid,
})
if err != nil {
return nil, err
}
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
name: lbs.repository.Name().Name(),
id: uuid,
})
if err != nil {
return nil, err
}
// Write a startedat file for this upload
if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
return nil, err
}
return lbs.newBlobUpload(ctx, uuid, path, startedAt)
}
func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume")
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
name: lbs.repository.Name().Name(),
id: id,
})
if err != nil {
return nil, err
}
startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return nil, distribution.ErrBlobUploadUnknown
default:
return nil, err
}
}
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
if err != nil {
return nil, err
}
path, err := pathFor(uploadDataPathSpec{
name: lbs.repository.Name().Name(),
id: id,
})
if err != nil {
return nil, err
}
return lbs.newBlobUpload(ctx, id, path, startedAt)
}
func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error {
if !lbs.deleteEnabled {
return distribution.ErrUnsupported
}
// Ensure the blob is available for deletion
_, err := lbs.blobAccessController.Stat(ctx, dgst)
if err != nil {
return err
}
err = lbs.blobAccessController.Clear(ctx, dgst)
if err != nil {
return err
}
return nil
}
func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) {
repo, err := lbs.registry.Repository(ctx, sourceRepo)
if err != nil {
return distribution.Descriptor{}, err
}
stat, err := repo.Blobs(ctx).Stat(ctx, dgst)
if err != nil {
return distribution.Descriptor{}, err
}
desc := distribution.Descriptor{
Size: stat.Size,
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}
return desc, lbs.linkBlob(ctx, desc)
}
// newBlobUpload allocates a new upload controller with the given state.
func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) {
fw, err := newFileWriter(ctx, lbs.driver, path)
if err != nil {
return nil, err
}
bw := &blobWriter{
blobStore: lbs,
id: uuid,
startedAt: startedAt,
digester: digest.Canonical.New(),
bufferedFileWriter: *fw,
resumableDigestEnabled: lbs.resumableDigestEnabled,
}
return bw, nil
}
// linkBlob links a valid, written blob into the registry under the named
// repository for the upload controller.
func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error {
dgsts := append([]digest.Digest{canonical.Digest}, aliases...)
// TODO(stevvooe): Need to write out mediatype for only canonical hash
// since we don't care about the aliases. They are generally unused except
// for tarsum but those versions don't care about mediatype.
// Don't make duplicate links.
seenDigests := make(map[digest.Digest]struct{}, len(dgsts))
// only use the first link
linkPathFn := lbs.linkPathFns[0]
for _, dgst := range dgsts {
if _, seen := seenDigests[dgst]; seen {
continue
}
seenDigests[dgst] = struct{}{}
blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst)
if err != nil {
return err
}
if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil {
return err
}
}
return nil
}
type linkedBlobStatter struct {
*blobStore
repository distribution.Repository
// linkPathFns specifies one or more path functions allowing one to
// control the repository blob link set to which the blob store
// dispatches. This is required because manifest and layer blobs have not
// yet been fully merged. At some point, this functionality should be
// removed an the blob links folder should be merged. The first entry is
// treated as the "canonical" link location and will be used for writes.
linkPathFns []linkPathFunc
}
var _ distribution.BlobDescriptorService = &linkedBlobStatter{}
func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
var (
resolveErr error
target digest.Digest
)
// try the many link path functions until we get success or an error that
// is not PathNotFoundError.
for _, linkPathFn := range lbs.linkPathFns {
var err error
target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn)
if err == nil {
break // success!
}
switch err := err.(type) {
case driver.PathNotFoundError:
resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error
default:
return distribution.Descriptor{}, err
}
}
if resolveErr != nil {
return distribution.Descriptor{}, resolveErr
}
if target != dgst {
// Track when we are doing cross-digest domain lookups. ie, sha512 to sha256.
context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target)
}
// TODO(stevvooe): Look up repository local mediatype and replace that on
// the returned descriptor.
return lbs.blobStore.statter.Stat(ctx, target)
}
func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) {
// clear any possible existence of a link described in linkPathFns
for _, linkPathFn := range lbs.linkPathFns {
blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst)
if err != nil {
return err
}
err = lbs.blobStore.driver.Delete(ctx, blobLinkPath)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
continue // just ignore this error and continue
default:
return err
}
}
}
return nil
}
// resolveTargetWithFunc allows us to read a link to a resource with different
// linkPathFuncs to let us try a few different paths before returning not
// found.
func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) {
blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst)
if err != nil {
return "", err
}
return lbs.blobStore.readlink(ctx, blobLinkPath)
}
func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
// The canonical descriptor for a blob is set at the commit phase of upload
return nil
}
// blobLinkPath provides the path to the blob link, also known as layers.
func blobLinkPath(name string, dgst digest.Digest) (string, error) {
return pathFor(layerLinkPathSpec{name: name, digest: dgst})
}
// manifestRevisionLinkPath provides the path to the manifest revision link.
func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst})
}

View File

@@ -1,96 +0,0 @@
package storage
import (
"fmt"
"encoding/json"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/manifestlist"
)
// manifestListHandler is a ManifestHandler that covers schema2 manifest lists.
type manifestListHandler struct {
repository *repository
blobStore *linkedBlobStore
ctx context.Context
}
var _ ManifestHandler = &manifestListHandler{}
func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal")
var m manifestlist.DeserializedManifestList
if err := json.Unmarshal(content, &m); err != nil {
return nil, err
}
return &m, nil
}
func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put")
m, ok := manifestList.(*manifestlist.DeserializedManifestList)
if !ok {
return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList)
}
if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
return "", err
}
mt, payload, err := m.Payload()
if err != nil {
return "", err
}
revision, err := ms.blobStore.Put(ctx, mt, payload)
if err != nil {
context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
return "", err
}
// Link the revision into the repository.
if err := ms.blobStore.linkBlob(ctx, revision); err != nil {
return "", err
}
return revision.Digest, nil
}
// verifyManifest ensures that the manifest content is valid from the
// perspective of the registry. As a policy, the registry only tries to
// store valid content, leaving trust policies of that content up to
// consumers.
func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification
if !skipDependencyVerification {
// This manifest service is different from the blob service
// returned by Blob. It uses a linked blob store to ensure that
// only manifests are accessible.
manifestService, err := ms.repository.Manifests(ctx)
if err != nil {
return err
}
for _, manifestDescriptor := range mnfst.References() {
exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest)
if err != nil && err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
if err != nil || !exists {
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest})
}
}
}
if len(errs) != 0 {
return errs
}
return nil
}

View File

@@ -1,134 +0,0 @@
package storage
import (
"fmt"
"encoding/json"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
)
// A ManifestHandler gets and puts manifests of a particular type.
type ManifestHandler interface {
// Unmarshal unmarshals the manifest from a byte slice.
Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error)
// Put creates or updates the given manifest returning the manifest digest.
Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error)
}
// SkipLayerVerification allows a manifest to be Put before its
// layers are on the filesystem
func SkipLayerVerification() distribution.ManifestServiceOption {
return skipLayerOption{}
}
type skipLayerOption struct{}
func (o skipLayerOption) Apply(m distribution.ManifestService) error {
if ms, ok := m.(*manifestStore); ok {
ms.skipDependencyVerification = true
return nil
}
return fmt.Errorf("skip layer verification only valid for manifestStore")
}
type manifestStore struct {
repository *repository
blobStore *linkedBlobStore
ctx context.Context
skipDependencyVerification bool
schema1Handler ManifestHandler
schema2Handler ManifestHandler
manifestListHandler ManifestHandler
}
var _ distribution.ManifestService = &manifestStore{}
func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists")
_, err := ms.blobStore.Stat(ms.ctx, dgst)
if err != nil {
if err == distribution.ErrBlobUnknown {
return false, nil
}
return false, err
}
return true, nil
}
func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
context.GetLogger(ms.ctx).Debug("(*manifestStore).Get")
// TODO(stevvooe): Need to check descriptor from above to ensure that the
// mediatype is as we expect for the manifest store.
content, err := ms.blobStore.Get(ctx, dgst)
if err != nil {
if err == distribution.ErrBlobUnknown {
return nil, distribution.ErrManifestUnknownRevision{
Name: ms.repository.Name().Name(),
Revision: dgst,
}
}
return nil, err
}
var versioned manifest.Versioned
if err = json.Unmarshal(content, &versioned); err != nil {
return nil, err
}
switch versioned.SchemaVersion {
case 1:
return ms.schema1Handler.Unmarshal(ctx, dgst, content)
case 2:
// This can be an image manifest or a manifest list
switch versioned.MediaType {
case schema2.MediaTypeManifest:
return ms.schema2Handler.Unmarshal(ctx, dgst, content)
case manifestlist.MediaTypeManifestList:
return ms.manifestListHandler.Unmarshal(ctx, dgst, content)
default:
return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)}
}
}
return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion)
}
func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
context.GetLogger(ms.ctx).Debug("(*manifestStore).Put")
switch manifest.(type) {
case *schema1.SignedManifest:
return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification)
case *schema2.DeserializedManifest:
return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification)
case *manifestlist.DeserializedManifestList:
return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification)
}
return "", fmt.Errorf("unrecognized manifest type %T", manifest)
}
// Delete removes the revision of the specified manfiest.
func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete")
return ms.blobStore.Delete(ctx, dgst)
}
func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
return 0, distribution.ErrUnsupported
}

View File

@@ -1,401 +0,0 @@
package storage
import (
"bytes"
"io"
"reflect"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/testutil"
"github.com/docker/libtrust"
)
type manifestStoreTestEnv struct {
ctx context.Context
driver driver.StorageDriver
registry distribution.Namespace
repository distribution.Repository
name reference.Named
tag string
}
func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string) *manifestStoreTestEnv {
ctx := context.Background()
driver := inmemory.New()
registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(
memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repo, err := registry.Repository(ctx, name)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
return &manifestStoreTestEnv{
ctx: ctx,
driver: driver,
registry: registry,
repository: repo,
name: name,
tag: tag,
}
}
func TestManifestStorage(t *testing.T) {
repoName, _ := reference.ParseNamed("foo/bar")
env := newManifestStoreTestEnv(t, repoName, "thetag")
ctx := context.Background()
ms, err := env.repository.Manifests(ctx)
if err != nil {
t.Fatal(err)
}
m := schema1.Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: env.name.Name(),
Tag: env.tag,
}
// Build up some test layers and add them to the manifest, saving the
// readseekers for upload later.
testLayers := map[digest.Digest]io.ReadSeeker{}
for i := 0; i < 2; i++ {
rs, ds, err := testutil.CreateRandomTarFile()
if err != nil {
t.Fatalf("unexpected error generating test layer file")
}
dgst := digest.Digest(ds)
testLayers[digest.Digest(dgst)] = rs
m.FSLayers = append(m.FSLayers, schema1.FSLayer{
BlobSum: dgst,
})
m.History = append(m.History, schema1.History{
V1Compatibility: "",
})
}
pk, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
t.Fatalf("unexpected error generating private key: %v", err)
}
sm, merr := schema1.Sign(&m, pk)
if merr != nil {
t.Fatalf("error signing manifest: %v", err)
}
_, err = ms.Put(ctx, sm)
if err == nil {
t.Fatalf("expected errors putting manifest with full verification")
}
switch err := err.(type) {
case distribution.ErrManifestVerification:
if len(err) != 2 {
t.Fatalf("expected 2 verification errors: %#v", err)
}
for _, err := range err {
if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok {
t.Fatalf("unexpected error type: %v", err)
}
}
default:
t.Fatalf("unexpected error verifying manifest: %v", err)
}
// Now, upload the layers that were missing!
for dgst, rs := range testLayers {
wr, err := env.repository.Blobs(env.ctx).Create(env.ctx)
if err != nil {
t.Fatalf("unexpected error creating test upload: %v", err)
}
if _, err := io.Copy(wr, rs); err != nil {
t.Fatalf("unexpected error copying to upload: %v", err)
}
if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil {
t.Fatalf("unexpected error finishing upload: %v", err)
}
}
var manifestDigest digest.Digest
if manifestDigest, err = ms.Put(ctx, sm); err != nil {
t.Fatalf("unexpected error putting manifest: %v", err)
}
exists, err := ms.Exists(ctx, manifestDigest)
if err != nil {
t.Fatalf("unexpected error checking manifest existence: %#v", err)
}
if !exists {
t.Fatalf("manifest should exist")
}
fromStore, err := ms.Get(ctx, manifestDigest)
if err != nil {
t.Fatalf("unexpected error fetching manifest: %v", err)
}
fetchedManifest, ok := fromStore.(*schema1.SignedManifest)
if !ok {
t.Fatalf("unexpected manifest type from signedstore")
}
if !reflect.DeepEqual(fetchedManifest, sm) {
t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm)
}
_, pl, err := fetchedManifest.Payload()
if err != nil {
t.Fatalf("error getting payload %#v", err)
}
fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures")
if err != nil {
t.Fatalf("unexpected error parsing jws: %v", err)
}
payload, err := fetchedJWS.Payload()
if err != nil {
t.Fatalf("unexpected error extracting payload: %v", err)
}
// Now that we have a payload, take a moment to check that the manifest is
// return by the payload digest.
dgst := digest.FromBytes(payload)
exists, err = ms.Exists(ctx, dgst)
if err != nil {
t.Fatalf("error checking manifest existence by digest: %v", err)
}
if !exists {
t.Fatalf("manifest %s should exist", dgst)
}
fetchedByDigest, err := ms.Get(ctx, dgst)
if err != nil {
t.Fatalf("unexpected error fetching manifest by digest: %v", err)
}
if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) {
t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest)
}
sigs, err := fetchedJWS.Signatures()
if err != nil {
t.Fatalf("unable to extract signatures: %v", err)
}
if len(sigs) != 1 {
t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1)
}
// Now, push the same manifest with a different key
pk2, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
t.Fatalf("unexpected error generating private key: %v", err)
}
sm2, err := schema1.Sign(&m, pk2)
if err != nil {
t.Fatalf("unexpected error signing manifest: %v", err)
}
_, pl, err = sm2.Payload()
if err != nil {
t.Fatalf("error getting payload %#v", err)
}
jws2, err := libtrust.ParsePrettySignature(pl, "signatures")
if err != nil {
t.Fatalf("error parsing signature: %v", err)
}
sigs2, err := jws2.Signatures()
if err != nil {
t.Fatalf("unable to extract signatures: %v", err)
}
if len(sigs2) != 1 {
t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1)
}
if manifestDigest, err = ms.Put(ctx, sm2); err != nil {
t.Fatalf("unexpected error putting manifest: %v", err)
}
fromStore, err = ms.Get(ctx, manifestDigest)
if err != nil {
t.Fatalf("unexpected error fetching manifest: %v", err)
}
fetched, ok := fromStore.(*schema1.SignedManifest)
if !ok {
t.Fatalf("unexpected type from signed manifeststore : %T", fetched)
}
if _, err := schema1.Verify(fetched); err != nil {
t.Fatalf("unexpected error verifying manifest: %v", err)
}
// Assemble our payload and two signatures to get what we expect!
expectedJWS, err := libtrust.NewJSONSignature(payload, sigs[0], sigs2[0])
if err != nil {
t.Fatalf("unexpected error merging jws: %v", err)
}
expectedSigs, err := expectedJWS.Signatures()
if err != nil {
t.Fatalf("unexpected error getting expected signatures: %v", err)
}
_, pl, err = fetched.Payload()
if err != nil {
t.Fatalf("error getting payload %#v", err)
}
receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures")
if err != nil {
t.Fatalf("unexpected error parsing jws: %v", err)
}
receivedPayload, err := receivedJWS.Payload()
if err != nil {
t.Fatalf("unexpected error extracting received payload: %v", err)
}
if !bytes.Equal(receivedPayload, payload) {
t.Fatalf("payloads are not equal")
}
receivedSigs, err := receivedJWS.Signatures()
if err != nil {
t.Fatalf("error getting signatures: %v", err)
}
for i, sig := range receivedSigs {
if !bytes.Equal(sig, expectedSigs[i]) {
t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i]))
}
}
// Test deleting manifests
err = ms.Delete(ctx, dgst)
if err != nil {
t.Fatalf("unexpected an error deleting manifest by digest: %v", err)
}
exists, err = ms.Exists(ctx, dgst)
if err != nil {
t.Fatalf("Error querying manifest existence")
}
if exists {
t.Errorf("Deleted manifest should not exist")
}
deletedManifest, err := ms.Get(ctx, dgst)
if err == nil {
t.Errorf("Unexpected success getting deleted manifest")
}
switch err.(type) {
case distribution.ErrManifestUnknownRevision:
break
default:
t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type())
}
if deletedManifest != nil {
t.Errorf("Deleted manifest get returned non-nil")
}
// Re-upload should restore manifest to a good state
_, err = ms.Put(ctx, sm)
if err != nil {
t.Errorf("Error re-uploading deleted manifest")
}
exists, err = ms.Exists(ctx, dgst)
if err != nil {
t.Fatalf("Error querying manifest existence")
}
if !exists {
t.Errorf("Restored manifest should exist")
}
deletedManifest, err = ms.Get(ctx, dgst)
if err != nil {
t.Errorf("Unexpected error getting manifest")
}
if deletedManifest == nil {
t.Errorf("Deleted manifest get returned non-nil")
}
r, err := NewRegistry(ctx, env.driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect)
if err != nil {
t.Fatalf("error creating registry: %v", err)
}
repo, err := r.Repository(ctx, env.name)
if err != nil {
t.Fatalf("unexpected error getting repo: %v", err)
}
ms, err = repo.Manifests(ctx)
if err != nil {
t.Fatal(err)
}
err = ms.Delete(ctx, dgst)
if err == nil {
t.Errorf("Unexpected success deleting while disabled")
}
}
// TestLinkPathFuncs ensures that the link path functions behavior are locked
// down and implemented as expected.
func TestLinkPathFuncs(t *testing.T) {
for _, testcase := range []struct {
repo string
digest digest.Digest
linkPathFn linkPathFunc
expected string
}{
{
repo: "foo/bar",
digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
linkPathFn: blobLinkPath,
expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link",
},
{
repo: "foo/bar",
digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
linkPathFn: manifestRevisionLinkPath,
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link",
},
} {
p, err := testcase.linkPathFn(testcase.repo, testcase.digest)
if err != nil {
t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err)
}
if p != testcase.expected {
t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected)
}
}
}

View File

@@ -1,493 +0,0 @@
package storage
import (
"fmt"
"path"
"strings"
"github.com/docker/distribution/digest"
)
const (
storagePathVersion = "v2" // fixed storage layout version
storagePathRoot = "/docker/registry/" // all driver paths have a prefix
// TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though
// storage path root would configurable for all drivers through this
// package. In reality, we've found it simpler to do this on a per driver
// basis.
)
// pathFor maps paths based on "object names" and their ids. The "object
// names" mapped by are internal to the storage system.
//
// The path layout in the storage backend is roughly as follows:
//
// <root>/v2
// -> repositories/
// -><name>/
// -> _manifests/
// revisions
// -> <manifest digest path>
// -> link
// -> signatures
// <algorithm>/<digest>/link
// tags/<tag>
// -> current/link
// -> index
// -> <algorithm>/<hex digest>/link
// -> _layers/
// <layer links to blob store>
// -> _uploads/<id>
// data
// startedat
// hashstates/<algorithm>/<offset>
// -> blob/<algorithm>
// <split directory content addressable storage>
//
// The storage backend layout is broken up into a content-addressable blob
// store and repositories. The content-addressable blob store holds most data
// throughout the backend, keyed by algorithm and digests of the underlying
// content. Access to the blob store is controled through links from the
// repository to blobstore.
//
// A repository is made up of layers, manifests and tags. The layers component
// is just a directory of layers which are "linked" into a repository. A layer
// can only be accessed through a qualified repository name if it is linked in
// the repository. Uploads of layers are managed in the uploads directory,
// which is key by upload id. When all data for an upload is received, the
// data is moved into the blob store and the upload directory is deleted.
// Abandoned uploads can be garbage collected by reading the startedat file
// and removing uploads that have been active for longer than a certain time.
//
// The third component of the repository directory is the manifests store,
// which is made up of a revision store and tag store. Manifests are stored in
// the blob store and linked into the revision store. Signatures are separated
// from the manifest payload data and linked into the blob store, as well.
// While the registry can save all revisions of a manifest, no relationship is
// implied as to the ordering of changes to a manifest. The tag store provides
// support for name, tag lookups of manifests, using "current/link" under a
// named tag directory. An index is maintained to support deletions of all
// revisions of a given manifest tag.
//
// We cover the path formats implemented by this path mapper below.
//
// Manifests:
//
// manifestRevisionPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/
// manifestRevisionLinkPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link
// manifestSignaturesPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/signatures/
// manifestSignatureLinkPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/signatures/<algorithm>/<hex digest>/link
//
// Tags:
//
// manifestTagsPathSpec: <root>/v2/repositories/<name>/_manifests/tags/
// manifestTagPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/
// manifestTagCurrentPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/current/link
// manifestTagIndexPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/
// manifestTagIndexEntryPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/
// manifestTagIndexEntryLinkPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/link
//
// Blobs:
//
// layerLinkPathSpec: <root>/v2/repositories/<name>/_layers/<algorithm>/<hex digest>/link
//
// Uploads:
//
// uploadDataPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/data
// uploadStartedAtPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/startedat
// uploadHashStatePathSpec: <root>/v2/repositories/<name>/_uploads/<id>/hashstates/<algorithm>/<offset>
//
// Blob Store:
//
// blobPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
// blobDataPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
// blobMediaTypePathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
//
// For more information on the semantic meaning of each path and their
// contents, please see the path spec documentation.
func pathFor(spec pathSpec) (string, error) {
// Switch on the path object type and return the appropriate path. At
// first glance, one may wonder why we don't use an interface to
// accomplish this. By keep the formatting separate from the pathSpec, we
// keep separate the path generation componentized. These specs could be
// passed to a completely different mapper implementation and generate a
// different set of paths.
//
// For example, imagine migrating from one backend to the other: one could
// build a filesystem walker that converts a string path in one version,
// to an intermediate path object, than can be consumed and mapped by the
// other version.
rootPrefix := []string{storagePathRoot, storagePathVersion}
repoPrefix := append(rootPrefix, "repositories")
switch v := spec.(type) {
case manifestRevisionPathSpec:
components, err := digestPathComponents(v.revision, false)
if err != nil {
return "", err
}
return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil
case manifestRevisionLinkPathSpec:
root, err := pathFor(manifestRevisionPathSpec{
name: v.name,
revision: v.revision,
})
if err != nil {
return "", err
}
return path.Join(root, "link"), nil
case manifestSignaturesPathSpec:
root, err := pathFor(manifestRevisionPathSpec{
name: v.name,
revision: v.revision,
})
if err != nil {
return "", err
}
return path.Join(root, "signatures"), nil
case manifestSignatureLinkPathSpec:
root, err := pathFor(manifestSignaturesPathSpec{
name: v.name,
revision: v.revision,
})
if err != nil {
return "", err
}
signatureComponents, err := digestPathComponents(v.signature, false)
if err != nil {
return "", err
}
return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil
case manifestTagsPathSpec:
return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil
case manifestTagPathSpec:
root, err := pathFor(manifestTagsPathSpec{
name: v.name,
})
if err != nil {
return "", err
}
return path.Join(root, v.tag), nil
case manifestTagCurrentPathSpec:
root, err := pathFor(manifestTagPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
return path.Join(root, "current", "link"), nil
case manifestTagIndexPathSpec:
root, err := pathFor(manifestTagPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
return path.Join(root, "index"), nil
case manifestTagIndexEntryLinkPathSpec:
root, err := pathFor(manifestTagIndexEntryPathSpec{
name: v.name,
tag: v.tag,
revision: v.revision,
})
if err != nil {
return "", err
}
return path.Join(root, "link"), nil
case manifestTagIndexEntryPathSpec:
root, err := pathFor(manifestTagIndexPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
components, err := digestPathComponents(v.revision, false)
if err != nil {
return "", err
}
return path.Join(root, path.Join(components...)), nil
case layerLinkPathSpec:
components, err := digestPathComponents(v.digest, false)
if err != nil {
return "", err
}
// TODO(stevvooe): Right now, all blobs are linked under "_layers". If
// we have future migrations, we may want to rename this to "_blobs".
// A migration strategy would simply leave existing items in place and
// write the new paths, commit a file then delete the old files.
blobLinkPathComponents := append(repoPrefix, v.name, "_layers")
return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil
case blobDataPathSpec:
components, err := digestPathComponents(v.digest, true)
if err != nil {
return "", err
}
components = append(components, "data")
blobPathPrefix := append(rootPrefix, "blobs")
return path.Join(append(blobPathPrefix, components...)...), nil
case uploadDataPathSpec:
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil
case uploadStartedAtPathSpec:
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil
case uploadHashStatePathSpec:
offset := fmt.Sprintf("%d", v.offset)
if v.list {
offset = "" // Limit to the prefix for listing offsets.
}
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil
case repositoriesRootPathSpec:
return path.Join(repoPrefix...), nil
default:
// TODO(sday): This is an internal error. Ensure it doesn't escape (panic?).
return "", fmt.Errorf("unknown path spec: %#v", v)
}
}
// pathSpec is a type to mark structs as path specs. There is no
// implementation because we'd like to keep the specs and the mappers
// decoupled.
type pathSpec interface {
pathSpec()
}
// manifestRevisionPathSpec describes the components of the directory path for
// a manifest revision.
type manifestRevisionPathSpec struct {
name string
revision digest.Digest
}
func (manifestRevisionPathSpec) pathSpec() {}
// manifestRevisionLinkPathSpec describes the path components required to look
// up the data link for a revision of a manifest. If this file is not present,
// the manifest blob is not available in the given repo. The contents of this
// file should just be the digest.
type manifestRevisionLinkPathSpec struct {
name string
revision digest.Digest
}
func (manifestRevisionLinkPathSpec) pathSpec() {}
// manifestSignaturesPathSpec decribes the path components for the directory
// containing all the signatures for the target blob. Entries are named with
// the underlying key id.
type manifestSignaturesPathSpec struct {
name string
revision digest.Digest
}
func (manifestSignaturesPathSpec) pathSpec() {}
// manifestSignatureLinkPathSpec decribes the path components used to look up
// a signature file by the hash of its blob.
type manifestSignatureLinkPathSpec struct {
name string
revision digest.Digest
signature digest.Digest
}
func (manifestSignatureLinkPathSpec) pathSpec() {}
// manifestTagsPathSpec describes the path elements required to point to the
// manifest tags directory.
type manifestTagsPathSpec struct {
name string
}
func (manifestTagsPathSpec) pathSpec() {}
// manifestTagPathSpec describes the path elements required to point to the
// manifest tag links files under a repository. These contain a blob id that
// can be used to look up the data and signatures.
type manifestTagPathSpec struct {
name string
tag string
}
func (manifestTagPathSpec) pathSpec() {}
// manifestTagCurrentPathSpec describes the link to the current revision for a
// given tag.
type manifestTagCurrentPathSpec struct {
name string
tag string
}
func (manifestTagCurrentPathSpec) pathSpec() {}
// manifestTagCurrentPathSpec describes the link to the index of revisions
// with the given tag.
type manifestTagIndexPathSpec struct {
name string
tag string
}
func (manifestTagIndexPathSpec) pathSpec() {}
// manifestTagIndexEntryPathSpec contains the entries of the index by revision.
type manifestTagIndexEntryPathSpec struct {
name string
tag string
revision digest.Digest
}
func (manifestTagIndexEntryPathSpec) pathSpec() {}
// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a
// manifest with given tag within the index.
type manifestTagIndexEntryLinkPathSpec struct {
name string
tag string
revision digest.Digest
}
func (manifestTagIndexEntryLinkPathSpec) pathSpec() {}
// blobLinkPathSpec specifies a path for a blob link, which is a file with a
// blob id. The blob link will contain a content addressable blob id reference
// into the blob store. The format of the contents is as follows:
//
// <algorithm>:<hex digest of layer data>
//
// The following example of the file contents is more illustrative:
//
// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36
//
// This indicates that there is a blob with the id/digest, calculated via
// sha256 that can be fetched from the blob store.
type layerLinkPathSpec struct {
name string
digest digest.Digest
}
func (layerLinkPathSpec) pathSpec() {}
// blobAlgorithmReplacer does some very simple path sanitization for user
// input. Paths should be "safe" before getting this far due to strict digest
// requirements but we can add further path conversion here, if needed.
var blobAlgorithmReplacer = strings.NewReplacer(
"+", "/",
".", "/",
";", "/",
)
// // blobPathSpec contains the path for the registry global blob store.
// type blobPathSpec struct {
// digest digest.Digest
// }
// func (blobPathSpec) pathSpec() {}
// blobDataPathSpec contains the path for the registry global blob store. For
// now, this contains layer data, exclusively.
type blobDataPathSpec struct {
digest digest.Digest
}
func (blobDataPathSpec) pathSpec() {}
// uploadDataPathSpec defines the path parameters of the data file for
// uploads.
type uploadDataPathSpec struct {
name string
id string
}
func (uploadDataPathSpec) pathSpec() {}
// uploadDataPathSpec defines the path parameters for the file that stores the
// start time of an uploads. If it is missing, the upload is considered
// unknown. Admittedly, the presence of this file is an ugly hack to make sure
// we have a way to cleanup old or stalled uploads that doesn't rely on driver
// FileInfo behavior. If we come up with a more clever way to do this, we
// should remove this file immediately and rely on the startetAt field from
// the client to enforce time out policies.
type uploadStartedAtPathSpec struct {
name string
id string
}
func (uploadStartedAtPathSpec) pathSpec() {}
// uploadHashStatePathSpec defines the path parameters for the file that stores
// the hash function state of an upload at a specific byte offset. If `list` is
// set, then the path mapper will generate a list prefix for all hash state
// offsets for the upload identified by the name, id, and alg.
type uploadHashStatePathSpec struct {
name string
id string
alg digest.Algorithm
offset int64
list bool
}
func (uploadHashStatePathSpec) pathSpec() {}
// repositoriesRootPathSpec returns the root of repositories
type repositoriesRootPathSpec struct {
}
func (repositoriesRootPathSpec) pathSpec() {}
// digestPathComponents provides a consistent path breakdown for a given
// digest. For a generic digest, it will be as follows:
//
// <algorithm>/<hex digest>
//
// If multilevel is true, the first two bytes of the digest will separate
// groups of digest folder. It will be as follows:
//
// <algorithm>/<first two bytes of digest>/<full digest>
//
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
if err := dgst.Validate(); err != nil {
return nil, err
}
algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm()))
hex := dgst.Hex()
prefix := []string{algorithm}
var suffix []string
if multilevel {
suffix = append(suffix, hex[:2])
}
suffix = append(suffix, hex)
return append(prefix, suffix...), nil
}

View File

@@ -1,122 +0,0 @@
package storage
import (
"testing"
)
func TestPathMapper(t *testing.T) {
for _, testcase := range []struct {
spec pathSpec
expected string
err error
}{
{
spec: manifestRevisionPathSpec{
name: "foo/bar",
revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
},
{
spec: manifestRevisionLinkPathSpec{
name: "foo/bar",
revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link",
},
{
spec: manifestSignatureLinkPathSpec{
name: "foo/bar",
revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
signature: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link",
},
{
spec: manifestSignaturesPathSpec{
name: "foo/bar",
revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures",
},
{
spec: manifestTagsPathSpec{
name: "foo/bar",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags",
},
{
spec: manifestTagPathSpec{
name: "foo/bar",
tag: "thetag",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag",
},
{
spec: manifestTagCurrentPathSpec{
name: "foo/bar",
tag: "thetag",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link",
},
{
spec: manifestTagIndexPathSpec{
name: "foo/bar",
tag: "thetag",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index",
},
{
spec: manifestTagIndexEntryPathSpec{
name: "foo/bar",
tag: "thetag",
revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
},
{
spec: manifestTagIndexEntryLinkPathSpec{
name: "foo/bar",
tag: "thetag",
revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
},
expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link",
},
{
spec: uploadDataPathSpec{
name: "foo/bar",
id: "asdf-asdf-asdf-adsf",
},
expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data",
},
{
spec: uploadStartedAtPathSpec{
name: "foo/bar",
id: "asdf-asdf-asdf-adsf",
},
expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat",
},
} {
p, err := pathFor(testcase.spec)
if err != nil {
t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err)
}
if p != testcase.expected {
t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected)
}
}
// Add a few test cases to ensure we cover some errors
// Specify a path that requires a revision and get a digest validation error.
badpath, err := pathFor(manifestSignaturesPathSpec{
name: "foo/bar",
})
if err == nil {
t.Fatalf("expected an error when mapping an invalid revision: %s", badpath)
}
}

View File

@@ -1,139 +0,0 @@
package storage
import (
"path"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storageDriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/uuid"
)
// uploadData stored the location of temporary files created during a layer upload
// along with the date the upload was started
type uploadData struct {
containingDir string
startedAt time.Time
}
func newUploadData() uploadData {
return uploadData{
containingDir: "",
// default to far in future to protect against missing startedat
startedAt: time.Now().Add(time.Duration(10000 * time.Hour)),
}
}
// PurgeUploads deletes files from the upload directory
// created before olderThan. The list of files deleted and errors
// encountered are returned
func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) {
log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete)
uploadData, errors := getOutstandingUploads(ctx, driver)
var deleted []string
for _, uploadData := range uploadData {
if uploadData.startedAt.Before(olderThan) {
var err error
log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.",
uploadData.containingDir, uploadData.startedAt, olderThan)
if actuallyDelete {
err = driver.Delete(ctx, uploadData.containingDir)
}
if err == nil {
deleted = append(deleted, uploadData.containingDir)
} else {
errors = append(errors, err)
}
}
}
log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors))
return deleted, errors
}
// getOutstandingUploads walks the upload directory, collecting files
// which could be eligible for deletion. The only reliable way to
// classify the age of a file is with the date stored in the startedAt
// file, so gather files by UUID with a date from startedAt.
func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) {
var errors []error
uploads := make(map[string]uploadData, 0)
inUploadDir := false
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return uploads, append(errors, err)
}
err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error {
filePath := fileInfo.Path()
_, file := path.Split(filePath)
if file[0] == '_' {
// Reserved directory
inUploadDir = (file == "_uploads")
if fileInfo.IsDir() && !inUploadDir {
return ErrSkipDir
}
}
uuid, isContainingDir := uUIDFromPath(filePath)
if uuid == "" {
// Cannot reliably delete
return nil
}
ud, ok := uploads[uuid]
if !ok {
ud = newUploadData()
}
if isContainingDir {
ud.containingDir = filePath
}
if file == "startedat" {
if t, err := readStartedAtFile(driver, filePath); err == nil {
ud.startedAt = t
} else {
errors = pushError(errors, filePath, err)
}
}
uploads[uuid] = ud
return nil
})
if err != nil {
errors = pushError(errors, root, err)
}
return uploads, errors
}
// uUIDFromPath extracts the upload UUID from a given path
// If the UUID is the last path component, this is the containing
// directory for all upload files
func uUIDFromPath(path string) (string, bool) {
components := strings.Split(path, "/")
for i := len(components) - 1; i >= 0; i-- {
if u, err := uuid.Parse(components[i]); err == nil {
return u.String(), i == len(components)-1
}
}
return "", false
}
// readStartedAtFile reads the date from an upload's startedAtFile
func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) {
// todo:(richardscothern) - pass in a context
startedAtBytes, err := driver.GetContent(context.Background(), path)
if err != nil {
return time.Now(), err
}
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
if err != nil {
return time.Now(), err
}
return startedAt, nil
}

View File

@@ -1,166 +0,0 @@
package storage
import (
"path"
"strings"
"testing"
"time"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/uuid"
)
func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) {
d := inmemory.New()
ctx := context.Background()
for i := 0; i < numUploads; i++ {
addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt)
}
return d, ctx
}
func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) {
dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID})
if err != nil {
t.Fatalf("Unable to resolve path")
}
if err := d.PutContent(ctx, dataPath, []byte("")); err != nil {
t.Fatalf("Unable to write data file")
}
startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID})
if err != nil {
t.Fatalf("Unable to resolve path")
}
if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
t.Fatalf("Unable to write startedAt file")
}
}
func TestPurgeGather(t *testing.T) {
uploadCount := 5
fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now())
uploadData, errs := getOutstandingUploads(ctx, fs)
if len(errs) != 0 {
t.Errorf("Unexepected errors: %q", errs)
}
if len(uploadData) != uploadCount {
t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData))
}
}
func TestPurgeNone(t *testing.T) {
fs, ctx := testUploadFS(t, 10, "test-repo", time.Now())
oneHourAgo := time.Now().Add(-1 * time.Hour)
deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true)
if len(errs) != 0 {
t.Error("Unexpected errors", errs)
}
if len(deleted) != 0 {
t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo)
}
}
func TestPurgeAll(t *testing.T) {
uploadCount := 10
oneHourAgo := time.Now().Add(-1 * time.Hour)
fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo)
// Ensure > 1 repos are purged
addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo)
uploadCount++
deleted, errs := PurgeUploads(ctx, fs, time.Now(), true)
if len(errs) != 0 {
t.Error("Unexpected errors:", errs)
}
fileCount := uploadCount
if len(deleted) != fileCount {
t.Errorf("Unexpectedly deleted file count %d != %d",
len(deleted), fileCount)
}
}
func TestPurgeSome(t *testing.T) {
oldUploadCount := 5
oneHourAgo := time.Now().Add(-1 * time.Hour)
fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo)
newUploadCount := 4
for i := 0; i < newUploadCount; i++ {
addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour))
}
deleted, errs := PurgeUploads(ctx, fs, time.Now(), true)
if len(errs) != 0 {
t.Error("Unexpected errors:", errs)
}
if len(deleted) != oldUploadCount {
t.Errorf("Unexpectedly deleted file count %d != %d",
len(deleted), oldUploadCount)
}
}
func TestPurgeOnlyUploads(t *testing.T) {
oldUploadCount := 5
oneHourAgo := time.Now().Add(-1 * time.Hour)
fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo)
// Create a directory tree outside _uploads and ensure
// these files aren't deleted.
dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()})
if err != nil {
t.Fatalf(err.Error())
}
nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1)
if strings.Index(nonUploadPath, "_upload") != -1 {
t.Fatalf("Non-upload path not created correctly")
}
nonUploadFile := path.Join(nonUploadPath, "file")
if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil {
t.Fatalf("Unable to write data file")
}
deleted, errs := PurgeUploads(ctx, fs, time.Now(), true)
if len(errs) != 0 {
t.Error("Unexpected errors", errs)
}
for _, file := range deleted {
if strings.Index(file, "_upload") == -1 {
t.Errorf("Non-upload file deleted")
}
}
}
func TestPurgeMissingStartedAt(t *testing.T) {
oneHourAgo := time.Now().Add(-1 * time.Hour)
fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo)
err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error {
filePath := fileInfo.Path()
_, file := path.Split(filePath)
if file == "startedat" {
if err := fs.Delete(ctx, filePath); err != nil {
t.Fatalf("Unable to delete startedat file: %s", filePath)
}
}
return nil
})
if err != nil {
t.Fatalf("Unexpected error during Walk: %s ", err.Error())
}
deleted, errs := PurgeUploads(ctx, fs, time.Now(), true)
if len(errs) > 0 {
t.Errorf("Unexpected errors")
}
if len(deleted) > 0 {
t.Errorf("Files unexpectedly deleted: %s", deleted)
}
}

View File

@@ -1,242 +0,0 @@
package storage
import (
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
// registry is the top-level implementation of Registry for use in the storage
// package. All instances should descend from this object.
type registry struct {
blobStore *blobStore
blobServer *blobServer
statter *blobStatter // global statter service.
blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider
deleteEnabled bool
resumableDigestEnabled bool
}
// RegistryOption is the type used for functional options for NewRegistry.
type RegistryOption func(*registry) error
// EnableRedirect is a functional option for NewRegistry. It causes the backend
// blob server to attempt using (StorageDriver).URLFor to serve all blobs.
func EnableRedirect(registry *registry) error {
registry.blobServer.redirect = true
return nil
}
// EnableDelete is a functional option for NewRegistry. It enables deletion on
// the registry.
func EnableDelete(registry *registry) error {
registry.deleteEnabled = true
return nil
}
// DisableDigestResumption is a functional option for NewRegistry. It should be
// used if the registry is acting as a caching proxy.
func DisableDigestResumption(registry *registry) error {
registry.resumableDigestEnabled = false
return nil
}
// BlobDescriptorCacheProvider returns a functional option for
// NewRegistry. It creates a cached blob statter for use by the
// registry.
func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption {
// TODO(aaronl): The duplication of statter across several objects is
// ugly, and prevents us from using interface types in the registry
// struct. Ideally, blobStore and blobServer should be lazily
// initialized, and use the current value of
// blobDescriptorCacheProvider.
return func(registry *registry) error {
if blobDescriptorCacheProvider != nil {
statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter)
registry.blobStore.statter = statter
registry.blobServer.statter = statter
registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider
}
return nil
}
}
// NewRegistry creates a new registry instance from the provided driver. The
// resulting registry may be shared by multiple goroutines but is cheap to
// allocate. If the Redirect option is specified, the backend blob server will
// attempt to use (StorageDriver).URLFor to serve all blobs.
func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) {
// create global statter
statter := &blobStatter{
driver: driver,
}
bs := &blobStore{
driver: driver,
statter: statter,
}
registry := &registry{
blobStore: bs,
blobServer: &blobServer{
driver: driver,
statter: statter,
pathFn: bs.path,
},
statter: statter,
resumableDigestEnabled: true,
}
for _, option := range options {
if err := option(registry); err != nil {
return nil, err
}
}
return registry, nil
}
// Scope returns the namespace scope for a registry. The registry
// will only serve repositories contained within this scope.
func (reg *registry) Scope() distribution.Scope {
return distribution.GlobalScope
}
// Repository returns an instance of the repository tied to the registry.
// Instances should not be shared between goroutines but are cheap to
// allocate. In general, they should be request scoped.
func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) {
var descriptorCache distribution.BlobDescriptorService
if reg.blobDescriptorCacheProvider != nil {
var err error
descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name())
if err != nil {
return nil, err
}
}
return &repository{
ctx: ctx,
registry: reg,
name: canonicalName,
descriptorCache: descriptorCache,
}, nil
}
// repository provides name-scoped access to various services.
type repository struct {
*registry
ctx context.Context
name reference.Named
descriptorCache distribution.BlobDescriptorService
}
// Name returns the name of the repository.
func (repo *repository) Name() reference.Named {
return repo.name
}
func (repo *repository) Tags(ctx context.Context) distribution.TagService {
tags := &tagStore{
repository: repo,
blobStore: repo.registry.blobStore,
}
return tags
}
// Manifests returns an instance of ManifestService. Instantiation is cheap and
// may be context sensitive in the future. The instance should be used similar
// to a request local.
func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
manifestLinkPathFns := []linkPathFunc{
// NOTE(stevvooe): Need to search through multiple locations since
// 2.1.0 unintentionally linked into _layers.
manifestRevisionLinkPath,
blobLinkPath,
}
blobStore := &linkedBlobStore{
ctx: ctx,
blobStore: repo.blobStore,
repository: repo,
deleteEnabled: repo.registry.deleteEnabled,
blobAccessController: &linkedBlobStatter{
blobStore: repo.blobStore,
repository: repo,
linkPathFns: manifestLinkPathFns,
},
// TODO(stevvooe): linkPath limits this blob store to only
// manifests. This instance cannot be used for blob checks.
linkPathFns: manifestLinkPathFns,
}
ms := &manifestStore{
ctx: ctx,
repository: repo,
blobStore: blobStore,
schema1Handler: &signedManifestHandler{
ctx: ctx,
repository: repo,
blobStore: blobStore,
signatures: &signatureStore{
ctx: ctx,
repository: repo,
blobStore: repo.blobStore,
},
},
schema2Handler: &schema2ManifestHandler{
ctx: ctx,
repository: repo,
blobStore: blobStore,
},
manifestListHandler: &manifestListHandler{
ctx: ctx,
repository: repo,
blobStore: blobStore,
},
}
// Apply options
for _, option := range options {
err := option.Apply(ms)
if err != nil {
return nil, err
}
}
return ms, nil
}
// Blobs returns an instance of the BlobStore. Instantiation is cheap and
// may be context sensitive in the future. The instance should be used similar
// to a request local.
func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore {
var statter distribution.BlobDescriptorService = &linkedBlobStatter{
blobStore: repo.blobStore,
repository: repo,
linkPathFns: []linkPathFunc{blobLinkPath},
}
if repo.descriptorCache != nil {
statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter)
}
return &linkedBlobStore{
registry: repo.registry,
blobStore: repo.blobStore,
blobServer: repo.blobServer,
blobAccessController: statter,
repository: repo,
ctx: ctx,
// TODO(stevvooe): linkPath limits this blob store to only layers.
// This instance cannot be used for manifest checks.
linkPathFns: []linkPathFunc{blobLinkPath},
deleteEnabled: repo.registry.deleteEnabled,
resumableDigestEnabled: repo.resumableDigestEnabled,
}
}

View File

@@ -1,99 +0,0 @@
package storage
import (
"fmt"
"encoding/json"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema2"
)
//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
type schema2ManifestHandler struct {
repository *repository
blobStore *linkedBlobStore
ctx context.Context
}
var _ ManifestHandler = &schema2ManifestHandler{}
func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal")
var m schema2.DeserializedManifest
if err := json.Unmarshal(content, &m); err != nil {
return nil, err
}
return &m, nil
}
func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put")
m, ok := manifest.(*schema2.DeserializedManifest)
if !ok {
return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest)
}
if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
return "", err
}
mt, payload, err := m.Payload()
if err != nil {
return "", err
}
revision, err := ms.blobStore.Put(ctx, mt, payload)
if err != nil {
context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
return "", err
}
// Link the revision into the repository.
if err := ms.blobStore.linkBlob(ctx, revision); err != nil {
return "", err
}
return revision.Digest, nil
}
// verifyManifest ensures that the manifest content is valid from the
// perspective of the registry. As a policy, the registry only tries to store
// valid content, leaving trust policies of that content up to consumers.
func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification
if !skipDependencyVerification {
target := mnfst.Target()
_, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest)
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest})
}
for _, fsLayer := range mnfst.References() {
_, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest)
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest})
}
}
}
if len(errs) != 0 {
return errs
}
return nil
}

View File

@@ -1,131 +0,0 @@
package storage
import (
"path"
"sync"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
)
type signatureStore struct {
repository *repository
blobStore *blobStore
ctx context.Context
}
func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) {
signaturesPath, err := pathFor(manifestSignaturesPathSpec{
name: s.repository.Name().Name(),
revision: dgst,
})
if err != nil {
return nil, err
}
// Need to append signature digest algorithm to path to get all items.
// Perhaps, this should be in the pathMapper but it feels awkward. This
// can be eliminated by implementing listAll on drivers.
signaturesPath = path.Join(signaturesPath, "sha256")
signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath)
if err != nil {
return nil, err
}
var wg sync.WaitGroup
type result struct {
index int
signature []byte
err error
}
ch := make(chan result)
bs := s.linkedBlobStore(s.ctx, dgst)
for i, sigPath := range signaturePaths {
sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath))
if err != nil {
context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath)
continue
}
wg.Add(1)
go func(idx int, sigdgst digest.Digest) {
defer wg.Done()
context.GetLogger(s.ctx).
Debugf("fetching signature %q", sigdgst)
r := result{index: idx}
if p, err := bs.Get(s.ctx, sigdgst); err != nil {
context.GetLogger(s.ctx).
Errorf("error fetching signature %q: %v", sigdgst, err)
r.err = err
} else {
r.signature = p
}
ch <- r
}(i, sigdgst)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
// aggregrate the results
signatures := make([][]byte, len(signaturePaths))
loop:
for {
select {
case result := <-ch:
signatures[result.index] = result.signature
if result.err != nil && err == nil {
// only set the first one.
err = result.err
}
case <-done:
break loop
}
}
return signatures, err
}
func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error {
bs := s.linkedBlobStore(s.ctx, dgst)
for _, signature := range signatures {
if _, err := bs.Put(s.ctx, "application/json", signature); err != nil {
return err
}
}
return nil
}
// linkedBlobStore returns the namedBlobStore of the signatures for the
// manifest with the given digest. Effectively, each signature link path
// layout is a unique linked blob store.
func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore {
linkpath := func(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestSignatureLinkPathSpec{
name: name,
revision: revision,
signature: dgst,
})
}
return &linkedBlobStore{
ctx: ctx,
repository: s.repository,
blobStore: s.blobStore,
blobAccessController: &linkedBlobStatter{
blobStore: s.blobStore,
repository: s.repository,
linkPathFns: []linkPathFunc{linkpath},
},
linkPathFns: []linkPathFunc{linkpath},
}
}

View File

@@ -1,150 +0,0 @@
package storage
import (
"encoding/json"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
)
// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It
// can unmarshal and put schema1 manifests that have been signed by libtrust.
type signedManifestHandler struct {
repository *repository
blobStore *linkedBlobStore
ctx context.Context
signatures *signatureStore
}
var _ ManifestHandler = &signedManifestHandler{}
func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal")
// Fetch the signatures for the manifest
signatures, err := ms.signatures.Get(dgst)
if err != nil {
return nil, err
}
jsig, err := libtrust.NewJSONSignature(content, signatures...)
if err != nil {
return nil, err
}
// Extract the pretty JWS
raw, err := jsig.PrettySignature("signatures")
if err != nil {
return nil, err
}
var sm schema1.SignedManifest
if err := json.Unmarshal(raw, &sm); err != nil {
return nil, err
}
return &sm, nil
}
func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put")
sm, ok := manifest.(*schema1.SignedManifest)
if !ok {
return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest)
}
if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil {
return "", err
}
mt := schema1.MediaTypeManifest
payload := sm.Canonical
revision, err := ms.blobStore.Put(ctx, mt, payload)
if err != nil {
context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
return "", err
}
// Link the revision into the repository.
if err := ms.blobStore.linkBlob(ctx, revision); err != nil {
return "", err
}
// Grab each json signature and store them.
signatures, err := sm.Signatures()
if err != nil {
return "", err
}
if err := ms.signatures.Put(revision.Digest, signatures...); err != nil {
return "", err
}
return revision.Digest, nil
}
// verifyManifest ensures that the manifest content is valid from the
// perspective of the registry. It ensures that the signature is valid for the
// enclosed payload. As a policy, the registry only tries to store valid
// content, leaving trust policies of that content up to consumers.
func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification
if len(mnfst.Name) > reference.NameTotalLengthMax {
errs = append(errs,
distribution.ErrManifestNameInvalid{
Name: mnfst.Name,
Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax),
})
}
if !reference.NameRegexp.MatchString(mnfst.Name) {
errs = append(errs,
distribution.ErrManifestNameInvalid{
Name: mnfst.Name,
Reason: fmt.Errorf("invalid manifest name format"),
})
}
if len(mnfst.History) != len(mnfst.FSLayers) {
errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d",
len(mnfst.History), len(mnfst.FSLayers)))
}
if _, err := schema1.Verify(&mnfst); err != nil {
switch err {
case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey:
errs = append(errs, distribution.ErrManifestUnverified{})
default:
if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust
errs = append(errs, distribution.ErrManifestUnverified{})
} else {
errs = append(errs, err)
}
}
}
if !skipDependencyVerification {
for _, fsLayer := range mnfst.References() {
_, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest)
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest})
}
}
}
if len(errs) != 0 {
return errs
}
return nil
}

View File

@@ -1,191 +0,0 @@
package storage
import (
"path"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
var _ distribution.TagService = &tagStore{}
// tagStore provides methods to manage manifest tags in a backend storage driver.
// This implementation uses the same on-disk layout as the (now deleted) tag
// store. This provides backward compatibility with current registry deployments
// which only makes use of the Digest field of the returned distribution.Descriptor
// but does not enable full roundtripping of Descriptor objects
type tagStore struct {
repository *repository
blobStore *blobStore
}
// All returns all tags
func (ts *tagStore) All(ctx context.Context) ([]string, error) {
var tags []string
pathSpec, err := pathFor(manifestTagPathSpec{
name: ts.repository.Name().Name(),
})
if err != nil {
return tags, err
}
entries, err := ts.blobStore.driver.List(ctx, pathSpec)
if err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name().Name()}
default:
return tags, err
}
}
for _, entry := range entries {
_, filename := path.Split(entry)
tags = append(tags, filename)
}
return tags, nil
}
// exists returns true if the specified manifest tag exists in the repository.
func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) {
tagPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Name().Name(),
tag: tag,
})
if err != nil {
return false, err
}
exists, err := exists(ctx, ts.blobStore.driver, tagPath)
if err != nil {
return false, err
}
return exists, nil
}
// Tag tags the digest with the given tag, updating the the store to point at
// the current tag. The digest must point to a manifest.
func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
currentPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Name().Name(),
tag: tag,
})
if err != nil {
return err
}
lbs := ts.linkedBlobStore(ctx, tag)
// Link into the index
if err := lbs.linkBlob(ctx, desc); err != nil {
return err
}
// Overwrite the current link
return ts.blobStore.link(ctx, currentPath, desc.Digest)
}
// resolve the current revision for name and tag.
func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
currentPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Name().Name(),
tag: tag,
})
if err != nil {
return distribution.Descriptor{}, err
}
revision, err := ts.blobStore.readlink(ctx, currentPath)
if err != nil {
switch err.(type) {
case storagedriver.PathNotFoundError:
return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag}
}
return distribution.Descriptor{}, err
}
return distribution.Descriptor{Digest: revision}, nil
}
// Untag removes the tag association
func (ts *tagStore) Untag(ctx context.Context, tag string) error {
tagPath, err := pathFor(manifestTagPathSpec{
name: ts.repository.Name().Name(),
tag: tag,
})
switch err.(type) {
case storagedriver.PathNotFoundError:
return distribution.ErrTagUnknown{Tag: tag}
case nil:
break
default:
return err
}
return ts.blobStore.driver.Delete(ctx, tagPath)
}
// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one
// to index manifest blobs by tag name. While the tag store doesn't map
// precisely to the linked blob store, using this ensures the links are
// managed via the same code path.
func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore {
return &linkedBlobStore{
blobStore: ts.blobStore,
repository: ts.repository,
ctx: ctx,
linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestTagIndexEntryLinkPathSpec{
name: name,
tag: tag,
revision: dgst,
})
}},
}
}
// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by
// digest, tag entries which point to it need to be recovered to avoid dangling tags.
func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) {
allTags, err := ts.All(ctx)
switch err.(type) {
case distribution.ErrRepositoryUnknown:
// This tag store has been initialized but not yet populated
break
case nil:
break
default:
return nil, err
}
var tags []string
for _, tag := range allTags {
tagLinkPathSpec := manifestTagCurrentPathSpec{
name: ts.repository.Name().Name(),
tag: tag,
}
tagLinkPath, err := pathFor(tagLinkPathSpec)
tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath)
if err != nil {
return nil, err
}
if tagDigest == desc.Digest {
tags = append(tags, tag)
}
}
return tags, nil
}

View File

@@ -1,208 +0,0 @@
package storage
import (
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/driver/inmemory"
)
type tagsTestEnv struct {
ts distribution.TagService
ctx context.Context
}
func testTagStore(t *testing.T) *tagsTestEnv {
ctx := context.Background()
d := inmemory.New()
reg, err := NewRegistry(ctx, d)
if err != nil {
t.Fatal(err)
}
repoRef, _ := reference.ParseNamed("a/b")
repo, err := reg.Repository(ctx, repoRef)
if err != nil {
t.Fatal(err)
}
return &tagsTestEnv{
ctx: ctx,
ts: repo.Tags(ctx),
}
}
func TestTagStoreTag(t *testing.T) {
env := testTagStore(t)
tags := env.ts
ctx := env.ctx
d := distribution.Descriptor{}
err := tags.Tag(ctx, "latest", d)
if err == nil {
t.Errorf("unexpected error putting malformed descriptor : %s", err)
}
d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
err = tags.Tag(ctx, "latest", d)
if err != nil {
t.Error(err)
}
d1, err := tags.Get(ctx, "latest")
if err != nil {
t.Error(err)
}
if d1.Digest != d.Digest {
t.Error("put and get digest differ")
}
// Overwrite existing
d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
err = tags.Tag(ctx, "latest", d)
if err != nil {
t.Error(err)
}
d1, err = tags.Get(ctx, "latest")
if err != nil {
t.Error(err)
}
if d1.Digest != d.Digest {
t.Error("put and get digest differ")
}
}
func TestTagStoreUnTag(t *testing.T) {
env := testTagStore(t)
tags := env.ts
ctx := env.ctx
desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"}
err := tags.Untag(ctx, "latest")
if err == nil {
t.Errorf("Expected error untagging non-existant tag")
}
err = tags.Tag(ctx, "latest", desc)
if err != nil {
t.Error(err)
}
err = tags.Untag(ctx, "latest")
if err != nil {
t.Error(err)
}
_, err = tags.Get(ctx, "latest")
if err == nil {
t.Error("Expected error getting untagged tag")
}
}
func TestTagStoreAll(t *testing.T) {
env := testTagStore(t)
tagStore := env.ts
ctx := env.ctx
alpha := "abcdefghijklmnopqrstuvwxyz"
for i := 0; i < len(alpha); i++ {
tag := alpha[i]
desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"}
err := tagStore.Tag(ctx, string(tag), desc)
if err != nil {
t.Error(err)
}
}
all, err := tagStore.All(ctx)
if err != nil {
t.Error(err)
}
if len(all) != len(alpha) {
t.Errorf("Unexpected count returned from enumerate")
}
for i, c := range all {
if c != string(alpha[i]) {
t.Errorf("unexpected tag in enumerate %s", c)
}
}
removed := "a"
err = tagStore.Untag(ctx, removed)
if err != nil {
t.Error(err)
}
all, err = tagStore.All(ctx)
if err != nil {
t.Error(err)
}
for _, tag := range all {
if tag == removed {
t.Errorf("unexpected tag in enumerate %s", removed)
}
}
}
func TestTagLookup(t *testing.T) {
env := testTagStore(t)
tagStore := env.ts
ctx := env.ctx
descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}
desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"}
tags, err := tagStore.Lookup(ctx, descA)
if err != nil {
t.Fatal(err)
}
if len(tags) != 0 {
t.Fatalf("Lookup returned > 0 tags from empty store")
}
err = tagStore.Tag(ctx, "a", descA)
if err != nil {
t.Fatal(err)
}
err = tagStore.Tag(ctx, "b", descA)
if err != nil {
t.Fatal(err)
}
err = tagStore.Tag(ctx, "0", desc0)
if err != nil {
t.Fatal(err)
}
err = tagStore.Tag(ctx, "1", desc0)
if err != nil {
t.Fatal(err)
}
tags, err = tagStore.Lookup(ctx, descA)
if err != nil {
t.Fatal(err)
}
if len(tags) != 2 {
t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags))
}
tags, err = tagStore.Lookup(ctx, desc0)
if err != nil {
t.Fatal(err)
}
if len(tags) != 2 {
t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags))
}
}

View File

@@ -1,21 +0,0 @@
package storage
import (
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
)
// Exists provides a utility method to test whether or not a path exists in
// the given driver.
func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) {
if _, err := drv.Stat(ctx, path); err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return false, nil
default:
return false, err
}
}
return true, nil
}

View File

@@ -1,65 +0,0 @@
package storage
import (
"path"
"github.com/docker/distribution/context"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/storage/driver"
)
// vacuum contains functions for cleaning up repositories and blobs
// These functions will only reliably work on strongly consistent
// storage systems.
// https://en.wikipedia.org/wiki/Consistency_model
// NewVacuum creates a new Vacuum
func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum {
return Vacuum{
ctx: ctx,
driver: driver,
}
}
// Vacuum removes content from the filesystem
type Vacuum struct {
driver driver.StorageDriver
ctx context.Context
}
// RemoveBlob removes a blob from the filesystem
func (v Vacuum) RemoveBlob(dgst string) error {
d, err := digest.ParseDigest(dgst)
if err != nil {
return err
}
blobPath, err := pathFor(blobDataPathSpec{digest: d})
if err != nil {
return err
}
context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath)
err = v.driver.Delete(v.ctx, blobPath)
if err != nil {
return err
}
return nil
}
// RemoveRepository removes a repository directory from the
// filesystem
func (v Vacuum) RemoveRepository(repoName string) error {
rootForRepository, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return err
}
repoDir := path.Join(rootForRepository, repoName)
context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir)
err = v.driver.Delete(v.ctx, repoDir)
if err != nil {
return err
}
return nil
}

View File

@@ -1,59 +0,0 @@
package storage
import (
"errors"
"fmt"
"sort"
"github.com/docker/distribution/context"
storageDriver "github.com/docker/distribution/registry/storage/driver"
)
// ErrSkipDir is used as a return value from onFileFunc to indicate that
// the directory named in the call is to be skipped. It is not returned
// as an error by any function.
var ErrSkipDir = errors.New("skip this directory")
// WalkFn is called once per file by Walk
// If the returned error is ErrSkipDir and fileInfo refers
// to a directory, the directory will not be entered and Walk
// will continue the traversal. Otherwise Walk will return
type WalkFn func(fileInfo storageDriver.FileInfo) error
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file
func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error {
children, err := driver.List(ctx, from)
if err != nil {
return err
}
sort.Stable(sort.StringSlice(children))
for _, child := range children {
// TODO(stevvooe): Calling driver.Stat for every entry is quite
// expensive when running against backends with a slow Stat
// implementation, such as s3. This is very likely a serious
// performance bottleneck.
fileInfo, err := driver.Stat(ctx, child)
if err != nil {
return err
}
err = f(fileInfo)
skipDir := (err == ErrSkipDir)
if err != nil && !skipDir {
return err
}
if fileInfo.IsDir() && !skipDir {
if err := Walk(ctx, driver, child, f); err != nil {
return err
}
}
}
return nil
}
// pushError formats an error type given a path and an error
// and pushes it to a slice of errors
func pushError(errors []error, path string, err error) []error {
return append(errors, fmt.Errorf("%s: %s", path, err))
}

View File

@@ -1,152 +0,0 @@
package storage
import (
"fmt"
"sort"
"testing"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/inmemory"
)
func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) {
d := inmemory.New()
ctx := context.Background()
expected := map[string]string{
"/a": "dir",
"/a/b": "dir",
"/a/b/c": "dir",
"/a/b/c/d": "file",
"/a/b/c/e": "file",
"/a/b/f": "dir",
"/a/b/f/g": "file",
"/a/b/f/h": "file",
"/a/b/f/i": "file",
"/z": "dir",
"/z/y": "file",
}
for p, typ := range expected {
if typ != "file" {
continue
}
if err := d.PutContent(ctx, p, []byte(p)); err != nil {
t.Fatalf("unable to put content into fixture: %v", err)
}
}
return d, expected, ctx
}
func TestWalkErrors(t *testing.T) {
d, expected, ctx := testFS(t)
fileCount := len(expected)
err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error {
return nil
})
if err == nil {
t.Error("Expected invalid root err")
}
errEarlyExpected := fmt.Errorf("Early termination")
err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error {
// error on the 2nd file
if fileInfo.Path() == "/a/b" {
return errEarlyExpected
}
delete(expected, fileInfo.Path())
return nil
})
if len(expected) != fileCount-1 {
t.Error("Walk failed to terminate with error")
}
if err != errEarlyExpected {
if err == nil {
t.Fatalf("expected an error due to early termination")
} else {
t.Error(err.Error())
}
}
err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error {
return nil
})
if err == nil {
t.Errorf("Expected missing file err")
}
}
func TestWalk(t *testing.T) {
d, expected, ctx := testFS(t)
var traversed []string
err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error {
filePath := fileInfo.Path()
filetype, ok := expected[filePath]
if !ok {
t.Fatalf("Unexpected file in walk: %q", filePath)
}
if fileInfo.IsDir() {
if filetype != "dir" {
t.Errorf("Unexpected file type: %q", filePath)
}
} else {
if filetype != "file" {
t.Errorf("Unexpected file type: %q", filePath)
}
// each file has its own path as the contents. If the length
// doesn't match the path length, fail.
if fileInfo.Size() != int64(len(fileInfo.Path())) {
t.Fatalf("unexpected size for %q: %v != %v",
fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path()))
}
}
delete(expected, filePath)
traversed = append(traversed, filePath)
return nil
})
if len(expected) > 0 {
t.Errorf("Missed files in walk: %q", expected)
}
if !sort.StringsAreSorted(traversed) {
t.Errorf("result should be sorted: %v", traversed)
}
if err != nil {
t.Fatalf(err.Error())
}
}
func TestWalkSkipDir(t *testing.T) {
d, expected, ctx := testFS(t)
err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error {
filePath := fileInfo.Path()
if filePath == "/a/b" {
// skip processing /a/b/c and /a/b/c/d
return ErrSkipDir
}
delete(expected, filePath)
return nil
})
if err != nil {
t.Fatalf(err.Error())
}
if _, ok := expected["/a/b/c"]; !ok {
t.Errorf("/a/b/c not skipped")
}
if _, ok := expected["/a/b/c/d"]; !ok {
t.Errorf("/a/b/c/d not skipped")
}
if _, ok := expected["/a/b/c/e"]; !ok {
t.Errorf("/a/b/c/e not skipped")
}
}