diff --git a/Dockerfile b/Dockerfile index 1040ee1a..e51a7ebf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,9 @@ FROM fedora RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md2man \ + # storage deps + btrfs-progs-devel \ + device-mapper-devel \ # gpgme bindings deps libassuan-devel gpgme-devel \ gnupg \ diff --git a/Dockerfile.build b/Dockerfile.build index 5c828e9e..7c89a84c 100644 --- a/Dockerfile.build +++ b/Dockerfile.build @@ -1,5 +1,5 @@ FROM ubuntu:16.04 RUN apt-get update -RUN apt-get install -y golang git-core libgpgme11-dev go-md2man +RUN apt-get install -y golang btrfs-tools git-core libdevmapper-dev libgpgme11-dev go-md2man ENV GOPATH=/ WORKDIR /src/github.com/projectatomic/skopeo diff --git a/Makefile b/Makefile index 2720cb8e..26cffb7b 100644 --- a/Makefile +++ b/Makefile @@ -32,6 +32,11 @@ GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true) MANPAGES_MD = $(wildcard docs/*.md) +BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) +LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh) +LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) +BUILDTAGS += $(LOCAL_BUILD_TAGS) + # make all DEBUG=1 # Note: Uses the -N -l go compiler options to disable compiler optimizations # and inlining. Using these build options allows you to subsequently diff --git a/hack/btrfs_tag.sh b/hack/btrfs_tag.sh new file mode 100755 index 00000000..cc48504a --- /dev/null +++ b/hack/btrfs_tag.sh @@ -0,0 +1,7 @@ +#!/bin/bash +cc -E - > /dev/null 2> /dev/null << EOF +#include +EOF +if test $? -ne 0 ; then + echo btrfs_noversion +fi diff --git a/hack/libdm_tag.sh b/hack/libdm_tag.sh new file mode 100755 index 00000000..476545e3 --- /dev/null +++ b/hack/libdm_tag.sh @@ -0,0 +1,14 @@ +#!/bin/bash +tmpdir="$PWD/tmp.$RANDOM" +mkdir -p "$tmpdir" +trap 'rm -fr "$tmpdir"' EXIT +cc -c -o "$tmpdir"/libdm_tag.o -x c - > /dev/null 2> /dev/null << EOF +#include +int main() { + struct dm_task *task; + return 0; +} +EOF +if test $? -ne 0 ; then + echo libdm_no_deferred_remove +fi diff --git a/hack/vendor.sh b/hack/vendor.sh index b2f3dc8f..f21a250a 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -8,6 +8,7 @@ source 'hack/.vendor-helpers.sh' clone git github.com/urfave/cli v1.17.0 clone git github.com/containers/image master clone git gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb +clone git github.com/containers/storage master clone git github.com/Sirupsen/logrus v0.10.0 clone git github.com/go-check/check v1 clone git github.com/stretchr/testify v1.1.3 @@ -34,6 +35,9 @@ clone git k8s.io/kubernetes 4a3f9c5b19c7ff804cbc1bf37a15c044ca5d2353 https://git clone git github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee clone git gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4 clone git github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc +# containers/storage's dependencies that aren't already being pulled in +clone git github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa +clone git github.com/pborman/uuid v1.0 clean diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index e8a9d42d..555e378d 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -32,6 +32,21 @@ type digestingReader struct { validationFailed bool } +// imageCopier allows us to keep track of diffID values for blobs, and other +// data, that we're copying between images, and cache other information that +// might allow us to take some shortcuts +type imageCopier struct { + copiedBlobs map[digest.Digest]digest.Digest + cachedDiffIDs map[digest.Digest]digest.Digest + manifestUpdates *types.ManifestUpdateOptions + dest types.ImageDestination + src types.Image + rawSource types.ImageSource + diffIDsAreNeeded bool + canModifyManifest bool + reportWriter io.Writer +} + // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // and set validationFailed to true if the source stream does not match expectedDigest. func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { @@ -147,7 +162,20 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe return err } - if err := copyLayers(&manifestUpdates, dest, src, rawSource, canModifyManifest, reportWriter); err != nil { + // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here. + ic := imageCopier{ + copiedBlobs: make(map[digest.Digest]digest.Digest), + cachedDiffIDs: make(map[digest.Digest]digest.Digest), + manifestUpdates: &manifestUpdates, + dest: dest, + src: src, + rawSource: rawSource, + diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates), + canModifyManifest: canModifyManifest, + reportWriter: reportWriter, + } + + if err := ic.copyLayers(); err != nil { return err } @@ -167,7 +195,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe return fmt.Errorf("Error reading manifest: %v", err) } - if err := copyConfig(dest, pendingImage, reportWriter); err != nil { + if err := ic.copyConfig(pendingImage); err != nil { return err } @@ -206,57 +234,41 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe return nil } -// copyLayers copies layers from src/rawSource to dest, using and updating manifestUpdates if necessary and canModifyManifest. -// If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time this function is called. -func copyLayers(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, rawSource types.ImageSource, - canModifyManifest bool, reportWriter io.Writer) error { - type copiedLayer struct { - blobInfo types.BlobInfo - diffID digest.Digest - } - - diffIDsAreNeeded := src.UpdatedImageNeedsLayerDiffIDs(*manifestUpdates) - - srcInfos := src.LayerInfos() +// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +func (ic *imageCopier) copyLayers() error { + srcInfos := ic.src.LayerInfos() destInfos := []types.BlobInfo{} diffIDs := []digest.Digest{} - copiedLayers := map[digest.Digest]copiedLayer{} for _, srcLayer := range srcInfos { - cl, ok := copiedLayers[srcLayer.Digest] - if !ok { - var ( - destInfo types.BlobInfo - diffID digest.Digest - err error - ) - if dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { - // DiffIDs are, currently, needed only when converting from schema1. - // In which case src.LayerInfos will not have URLs because schema1 - // does not support them. - if diffIDsAreNeeded { - return errors.New("getting DiffID for foreign layers is unimplemented") - } - destInfo = srcLayer - fmt.Fprintf(reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, dest.Reference().Transport().Name()) - } else { - fmt.Fprintf(reportWriter, "Copying blob %s\n", srcLayer.Digest) - destInfo, diffID, err = copyLayer(dest, rawSource, srcLayer, diffIDsAreNeeded, canModifyManifest, reportWriter) - if err != nil { - return err - } + var ( + destInfo types.BlobInfo + diffID digest.Digest + err error + ) + if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + return errors.New("getting DiffID for foreign layers is unimplemented") + } + destInfo = srcLayer + fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name()) + } else { + destInfo, diffID, err = ic.copyLayer(srcLayer) + if err != nil { + return err } - cl = copiedLayer{blobInfo: destInfo, diffID: diffID} - copiedLayers[srcLayer.Digest] = cl } - destInfos = append(destInfos, cl.blobInfo) - diffIDs = append(diffIDs, cl.diffID) + destInfos = append(destInfos, destInfo) + diffIDs = append(diffIDs, diffID) } - manifestUpdates.InformationOnly.LayerInfos = destInfos - if diffIDsAreNeeded { - manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs } if layerDigestsDiffer(srcInfos, destInfos) { - manifestUpdates.LayerInfos = destInfos + ic.manifestUpdates.LayerInfos = destInfos } return nil } @@ -275,15 +287,15 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { } // copyConfig copies config.json, if any, from src to dest. -func copyConfig(dest types.ImageDestination, src types.Image, reportWriter io.Writer) error { +func (ic *imageCopier) copyConfig(src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { - fmt.Fprintf(reportWriter, "Copying config %s\n", srcInfo.Digest) + fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest) configBlob, err := src.ConfigBlob() if err != nil { return fmt.Errorf("Error reading config blob %s: %v", srcInfo.Digest, err) } - destInfo, err := copyBlobFromStream(dest, bytes.NewReader(configBlob), srcInfo, nil, false, reportWriter) + destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) if err != nil { return err } @@ -303,16 +315,40 @@ type diffIDResult struct { // copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types.BlobInfo, - diffIDIsNeeded bool, canCompress bool, reportWriter io.Writer) (types.BlobInfo, digest.Digest, error) { - srcStream, srcBlobSize, err := src.GetBlob(srcInfo) +func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { + // Check if we already have a blob with this digest + haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo) + if err != nil && err != types.ErrBlobNotFound { + return types.BlobInfo{}, "", fmt.Errorf("Error checking for blob %s at destination: %v", srcInfo.Digest, err) + } + // If we already have a cached diffID for this blob, we don't need to compute it + diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "") + // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again + if haveBlob && !diffIDIsNeeded { + // Check the blob sizes match, if we were given a size this time + if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize { + return types.BlobInfo{}, "", fmt.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size) + } + srcInfo.Size = extantBlobSize + // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob + blobinfo, err := ic.dest.ReapplyBlob(srcInfo) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("Error reapplying blob %s at destination: %v", srcInfo.Digest, err) + } + fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest) + return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err + } + + // Fallback: copy the layer, computing the diffID if we need to do so + fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest) + srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err) } defer srcStream.Close() - blobInfo, diffIDChan, err := copyLayerFromStream(dest, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, - diffIDIsNeeded, canCompress, reportWriter) + blobInfo, diffIDChan, err := ic.copyLayerFromStream(srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, + diffIDIsNeeded) if err != nil { return types.BlobInfo{}, "", err } @@ -323,6 +359,7 @@ func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types return types.BlobInfo{}, "", fmt.Errorf("Error computing layer DiffID: %v", diffIDResult.err) } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest } return blobInfo, diffIDResult.digest, nil } @@ -331,8 +368,8 @@ func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types // it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, // perhaps compressing the stream if canCompress, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. -func copyLayerFromStream(dest types.ImageDestination, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, canCompress bool, reportWriter io.Writer) (types.BlobInfo, <-chan diffIDResult, error) { +func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(decompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -356,8 +393,7 @@ func copyLayerFromStream(dest types.ImageDestination, srcStream io.Reader, srcIn return pipeWriter } } - blobInfo, err := copyBlobFromStream(dest, srcStream, srcInfo, - getDiffIDRecorder, canCompress, reportWriter) // Sets err to nil on success + blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -391,9 +427,9 @@ func computeDiffID(stream io.Reader, decompressor decompressorFunc) (digest.Dige // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, // perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied blob. -func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInfo types.BlobInfo, - getOriginalLayerCopyWriter func(decompressor decompressorFunc) io.Writer, canCompress bool, - reportWriter io.Writer) (types.BlobInfo, error) { +func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor decompressorFunc) io.Writer, + canCompress bool) (types.BlobInfo, error) { // The copying happens through a pipeline of connected io.Readers. // === Input: srcStream @@ -419,13 +455,13 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf // === Report progress using a pb.Reader. bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) - bar.Output = reportWriter + bar.Output = ic.reportWriter bar.SetMaxWidth(80) bar.ShowTimeLeft = false bar.ShowPercent = false bar.Start() destStream = bar.NewProxyReader(destStream) - defer fmt.Fprint(reportWriter, "\n") + defer fmt.Fprint(ic.reportWriter, "\n") // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. @@ -436,7 +472,7 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf // === Compress the layer if it is uncompressed and compression is desired var inputInfo types.BlobInfo - if !canCompress || isCompressed || !dest.ShouldCompressLayers() { + if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() { logrus.Debugf("Using original blob without modification") inputInfo = srcInfo } else { @@ -454,7 +490,7 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf } // === Finally, send the layer stream to dest. - uploadedInfo, err := dest.PutBlob(destStream, inputInfo) + uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo) if err != nil { return types.BlobInfo{}, fmt.Errorf("Error writing blob: %v", err) } diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index 6d76d30e..2a8d102e 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -94,6 +94,25 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo return types.BlobInfo{Digest: computedDigest, Size: size}, nil } +func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath := d.ref.layerPath(info.Digest) + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, -1, types.ErrBlobNotFound + } + if err != nil { + return false, -1, err + } + return true, finfo.Size(), nil +} + +func (d *dirImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil +} + func (d *dirImageDestination) PutManifest(manifest []byte) error { return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) } diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go index 70db947f..7c4576dd 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go @@ -29,7 +29,8 @@ type daemonImageDestination struct { writer *io.PipeWriter tar *tar.Writer // Other state - committed bool // writer has been closed + committed bool // writer has been closed + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs } // newImageDestination returns a types.ImageDestination for the specified image reference. @@ -62,6 +63,7 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t writer: writer, tar: tar.NewWriter(writer), committed: false, + blobs: make(map[digest.Digest]types.BlobInfo), }, nil } @@ -142,8 +144,8 @@ func (d *daemonImageDestination) AcceptsForeignLayerURLs() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if inputInfo.Digest.String() == "" { - return types.BlobInfo{}, fmt.Errorf(`"Can not stream a blob with unknown digest to "docker-daemon:"`) + if ok, size, err := d.HasBlob(inputInfo); err == nil && ok { + return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil } if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size. @@ -173,9 +175,24 @@ func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil { return types.BlobInfo{}, err } + d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size} return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil } +func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + if blob, ok := d.blobs[info.Digest]; ok { + return true, blob.Size, nil + } + return false, -1, types.ErrBlobNotFound +} + +func (d *daemonImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil +} + func (d *daemonImageDestination) PutManifest(m []byte) error { var man schema2Manifest if err := json.Unmarshal(m, &man); err != nil { diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 48231d69..35ecb96e 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -17,7 +17,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/containers/image/types" - "github.com/docker/docker/pkg/homedir" + "github.com/containers/storage/pkg/homedir" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" ) @@ -428,7 +428,7 @@ func (c *dockerClient) ping() (*pingResponse, error) { } if err != nil { err = fmt.Errorf("pinging docker registry returned %+v", err) - if c.ctx.DockerDisableV1Ping { + if c.ctx != nil && c.ctx.DockerDisableV1Ping { return nil, err } // best effort to understand if we're talking to a V1 registry diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 343079af..54df01d7 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -163,6 +163,39 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil } +func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), info.Digest.String()) + + logrus.Debugf("Checking %s", checkURL) + res, err := d.c.makeRequest("HEAD", checkURL, nil, nil) + if err != nil { + return false, -1, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusOK: + logrus.Debugf("... already exists") + return true, getBlobSize(res), nil + case http.StatusUnauthorized: + logrus.Debugf("... not authorized") + return false, -1, fmt.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName()) + case http.StatusNotFound: + logrus.Debugf("... not present") + return false, -1, types.ErrBlobNotFound + default: + logrus.Errorf("failed to read from destination repository %s: %v", d.ref.ref.RemoteName(), http.StatusText(res.StatusCode)) + } + logrus.Debugf("... failed, status %d, ignoring", res.StatusCode) + return false, -1, types.ErrBlobNotFound +} + +func (d *dockerImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil +} + func (d *dockerImageDestination) PutManifest(m []byte) error { digest, err := manifest.Digest(m) if err != nil { @@ -209,8 +242,8 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error { return fmt.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured") } - // FIXME: This assumption that signatures are stored after the manifest rather breaks the model. if d.manifestDigest.String() == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures return fmt.Errorf("Unknown manifest digest, can't add signatures") } diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go index 568f855a..9404db82 100644 --- a/vendor/github.com/containers/image/image/memory.go +++ b/vendor/github.com/containers/image/image/memory.go @@ -35,6 +35,15 @@ func (i *memoryImage) Reference() types.ImageReference { func (i *memoryImage) Close() { } +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + s, err := i.serialize() + if err != nil { + return -1, err + } + return int64(len(s)), nil +} + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. func (i *memoryImage) Manifest() ([]byte, string, error) { if i.serializedManifest == nil { diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go index a7c25ab1..ef35b3c3 100644 --- a/vendor/github.com/containers/image/image/sourced.go +++ b/vendor/github.com/containers/image/image/sourced.go @@ -71,6 +71,11 @@ func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) { }, nil } +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *sourcedImage) Size() (int64, error) { + return -1, nil +} + // Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. func (i *sourcedImage) Manifest() ([]byte, string, error) { return i.manifestBlob, i.manifestMIMEType, nil diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go index 55581c7e..1c849e0d 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -112,6 +112,28 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo return types.BlobInfo{Digest: computedDigest, Size: size}, nil } +func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath, err := d.ref.blobPath(info.Digest) + if err != nil { + return false, -1, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, -1, types.ErrBlobNotFound + } + if err != nil { + return false, -1, err + } + return true, finfo.Size(), nil +} + +func (d *ociImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil +} + func createManifest(m []byte) ([]byte, string, error) { om := imgspecv1.Manifest{} mt := manifest.GuessMIMEType(m) diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 65b80f92..a8f66681 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -366,6 +366,14 @@ func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.Bl return d.docker.PutBlob(stream, inputInfo) } +func (d *openshiftImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + return d.docker.HasBlob(info) +} + +func (d *openshiftImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return d.docker.ReapplyBlob(info) +} + func (d *openshiftImageDestination) PutManifest(m []byte) error { manifestDigest, err := manifest.Digest(m) if err != nil { diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go index 2bd95569..adac6888 100644 --- a/vendor/github.com/containers/image/signature/policy_config.go +++ b/vendor/github.com/containers/image/signature/policy_config.go @@ -41,8 +41,6 @@ func (err InvalidPolicyFormatError) Error() string { return string(err) } -// FIXME: NewDefaultPolicy, from default file (or environment if trusted?) - // DefaultPolicy returns the default policy of the system. // Most applications should be using this method to get the policy configured // by the system administrator. diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go new file mode 100644 index 00000000..132c5ad5 --- /dev/null +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -0,0 +1,570 @@ +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/image" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/storage" + ddigest "github.com/docker/distribution/digest" +) + +var ( + // ErrBlobDigestMismatch is returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + ErrBlobDigestMismatch = errors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = errors.New("blob size mismatch") + // ErrNoManifestLists is returned when GetTargetManifest() is + // called. + ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") + // ErrNoSuchImage is returned when we attempt to access an image which + // doesn't exist in the storage area. + ErrNoSuchImage = storage.ErrNotAnImage +) + +type storageImageSource struct { + imageRef storageReference + Tag string `json:"tag,omitempty"` + Created time.Time `json:"created-time,omitempty"` + ID string `json:"id"` + BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle + Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs + LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice +} + +type storageImageDestination struct { + imageRef storageReference + Tag string `json:"tag,omitempty"` + Created time.Time `json:"created-time,omitempty"` + ID string `json:"id"` + BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle + Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs + BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary + Manifest []byte `json:"-"` // Manifest contents, temporary + Signatures []byte `json:"-"` // Signature contents, temporary + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice +} + +type storageLayerMetadata struct { + Digest string `json:"digest,omitempty"` + Size int64 `json:"size"` + CompressedSize int64 `json:"compressed-size,omitempty"` +} + +type storageImage struct { + types.Image + size int64 +} + +// newImageSource sets us up to read out an image, which needs to already exist. +func newImageSource(imageRef storageReference) (*storageImageSource, error) { + id := imageRef.resolveID() + if id == "" { + logrus.Errorf("no image matching reference %q found", imageRef.StringWithinTransport()) + return nil, ErrNoSuchImage + } + img, err := imageRef.transport.store.GetImage(id) + if err != nil { + return nil, fmt.Errorf("error reading image %q: %v", id, err) + } + image := &storageImageSource{ + imageRef: imageRef, + Created: time.Now(), + ID: img.ID, + BlobList: []types.BlobInfo{}, + Layers: make(map[ddigest.Digest][]string), + LayerPosition: make(map[ddigest.Digest]int), + SignatureSizes: []int{}, + } + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, fmt.Errorf("error decoding metadata for source image: %v", err) + } + return image, nil +} + +// newImageDestination sets us up to write a new image. +func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { + image := &storageImageDestination{ + imageRef: imageRef, + Tag: imageRef.reference, + Created: time.Now(), + ID: imageRef.id, + BlobList: []types.BlobInfo{}, + Layers: make(map[ddigest.Digest][]string), + BlobData: make(map[ddigest.Digest][]byte), + SignatureSizes: []int{}, + } + return image, nil +} + +func (s storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +func (s storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +func (s storageImageSource) Close() { +} + +func (s storageImageDestination) Close() { +} + +func (s storageImageDestination) ShouldCompressLayers() bool { + // We ultimately have to decompress layers to populate trees on disk, + // so callers shouldn't bother compressing them before handing them to + // us, if they're not already compressed. + return false +} + +// PutBlob is used to both store filesystem layers and binary data that is part +// of the image. Filesystem layers are assumed to be imported in order, as +// that is required by some of the underlying storage drivers. +func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { + blobSize := int64(-1) + digest := blobinfo.Digest + errorBlobInfo := types.BlobInfo{ + Digest: "", + Size: -1, + } + // Try to read an initial snippet of the blob. + header := make([]byte, 10240) + n, err := stream.Read(header) + if err != nil && err != io.EOF { + return errorBlobInfo, err + } + // Set up to read the whole blob (the initial snippet, plus the rest) + // while digesting it with either the default, or the passed-in digest, + // if one was specified. + hasher := ddigest.Canonical.New() + if digest.Validate() == nil { + if a := digest.Algorithm(); a.Available() { + hasher = a.New() + } + } + hash := "" + counter := ioutils.NewWriteCounter(hasher.Hash()) + defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), stream) + multi := io.TeeReader(defragmented, counter) + if (n > 0) && archive.IsArchive(header[:n]) { + // It's a filesystem layer. If it's not the first one in the + // image, we assume that the most recently added layer is its + // parent. + parentLayer := "" + for _, blob := range s.BlobList { + if layerList, ok := s.Layers[blob.Digest]; ok { + parentLayer = layerList[len(layerList)-1] + } + } + // If we have an expected content digest, generate a layer ID + // based on the parent's ID and the expected content digest. + id := "" + if digest.Validate() == nil { + id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() + } + // Attempt to create the identified layer and import its contents. + layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) + if err != nil && err != storage.ErrDuplicateID { + logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) + return errorBlobInfo, err + } + if err == storage.ErrDuplicateID { + // We specified an ID, and there's already a layer with + // the same ID. Drain the input so that we can look at + // its length and digest. + _, err := io.Copy(ioutil.Discard, multi) + if err != nil && err != io.EOF { + logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) + return errorBlobInfo, err + } + hash = hasher.Digest().String() + } else { + // Applied the layer with the specified ID. Note the + // size info and computed digest. + hash = hasher.Digest().String() + layerMeta := storageLayerMetadata{ + Digest: hash, + CompressedSize: counter.Count, + Size: uncompressedSize, + } + if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { + s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) + } + // Hang on to the new layer's ID. + id = layer.ID + } + blobSize = counter.Count + // Check if the size looks right. + if blobinfo.Size >= 0 && blobSize != blobinfo.Size { + logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size) + if layer != nil { + // Something's wrong; delete the newly-created layer. + s.imageRef.transport.store.DeleteLayer(layer.ID) + } + return errorBlobInfo, ErrBlobSizeMismatch + } + // If the content digest was specified, verify it. + if digest.Validate() == nil && digest.String() != hash { + logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) + if layer != nil { + // Something's wrong; delete the newly-created layer. + s.imageRef.transport.store.DeleteLayer(layer.ID) + } + return errorBlobInfo, ErrBlobDigestMismatch + } + // If we didn't get a digest, construct one. + if digest == "" { + digest = ddigest.Digest(hash) + } + // Record that this layer blob is a layer, and the layer ID it + // ended up having. This is a list, in case the same blob is + // being applied more than once. + s.Layers[digest] = append(s.Layers[digest], id) + s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize}) + if layer != nil { + logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) + } else { + logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) + } + } else { + // It's just data. Finish scanning it in, check that our + // computed digest matches the passed-in digest, and store it, + // but leave it out of the blob-to-layer-ID map so that we can + // tell that it's not a layer. + blob, err := ioutil.ReadAll(multi) + if err != nil && err != io.EOF { + return errorBlobInfo, err + } + blobSize = int64(len(blob)) + hash = hasher.Digest().String() + if blobinfo.Size >= 0 && blobSize != blobinfo.Size { + logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size) + return errorBlobInfo, ErrBlobSizeMismatch + } + // If we were given a digest, verify that the content matches + // it. + if digest.Validate() == nil && digest.String() != hash { + logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) + return errorBlobInfo, ErrBlobDigestMismatch + } + // If we didn't get a digest, construct one. + if digest == "" { + digest = ddigest.Digest(hash) + } + // Save the blob for when we Commit(). + s.BlobData[digest] = blob + s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize}) + logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) + } + return types.BlobInfo{ + Digest: digest, + Size: blobSize, + }, nil +} + +func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { + if blobinfo.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + for _, blob := range s.BlobList { + if blob.Digest == blobinfo.Digest { + return true, blob.Size, nil + } + } + return false, -1, types.ErrBlobNotFound +} + +func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { + err := blobinfo.Digest.Validate() + if err != nil { + return types.BlobInfo{}, err + } + if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { + b, err := s.imageRef.transport.store.GetImageBigData(s.ID, blobinfo.Digest.String()) + if err != nil { + return types.BlobInfo{}, err + } + return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil + } + layerList := s.Layers[blobinfo.Digest] + rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) + if err != nil { + return types.BlobInfo{}, err + } + return s.PutBlob(rc, blobinfo) +} + +func (s *storageImageDestination) Commit() error { + // Create the image record. + lastLayer := "" + for _, blob := range s.BlobList { + if layerList, ok := s.Layers[blob.Digest]; ok { + lastLayer = layerList[len(layerList)-1] + } + } + img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) + if err != nil { + logrus.Debugf("error creating image: %q", err) + return err + } + logrus.Debugf("created new image ID %q", img.ID) + s.ID = img.ID + if s.Tag != "" { + // We have a name to set, so move the name to this image. + if err := s.imageRef.transport.store.SetNames(img.ID, []string{s.Tag}); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error setting names on image %q: %v", img.ID, err) + return err + } + logrus.Debugf("set name of image %q to %q", img.ID, s.Tag) + } + // Save the data blobs to disk, and drop their contents from memory. + keys := []ddigest.Digest{} + for k, v := range s.BlobData { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) + return err + } + keys = append(keys, k) + } + for _, key := range keys { + delete(s.BlobData, key) + } + // Save the manifest, if we have one. + if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return err + } + // Save the signatures, if we have any. + if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err + } + // Save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) + return err + } + if len(metadata) != 0 { + if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return err + } + logrus.Debugf("saved image metadata %q", string(metadata)) + } + return nil +} + +func (s *storageImageDestination) SupportedManifestMIMETypes() []string { + return nil +} + +func (s *storageImageDestination) PutManifest(manifest []byte) error { + s.Manifest = make([]byte, len(manifest)) + copy(s.Manifest, manifest) + return nil +} + +// SupportsSignatures returns an error if we can't expect GetSignatures() to +// return data that was previously supplied to PutSignatures(). +func (s *storageImageDestination) SupportsSignatures() error { + return nil +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { + sizes := []int{} + sigblob := []byte{} + for _, sig := range signatures { + sizes = append(sizes, len(sig)) + newblob := make([]byte, len(sigblob)+len(sig)) + copy(newblob, sigblob) + copy(newblob[len(sigblob):], sig) + sigblob = newblob + } + s.Signatures = sigblob + s.SignatureSizes = sizes + return nil +} + +func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err +} + +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { + b, err := s.imageRef.transport.store.GetImageBigData(s.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // If the blob was "put" more than once, we have multiple layer IDs + // which should all produce the same diff. For the sake of tests that + // want to make sure we created different layers each time the blob was + // "put", though, cycle through the layers. + layerList := s.Layers[info.Digest] + position, ok := s.LayerPosition[info.Digest] + if !ok { + position = 0 + } + s.LayerPosition[info.Digest] = (position + 1) % len(layerList) + logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) + rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) + return rc, n, layerList[position], err +} + +func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { + layer, err := store.GetLayer(layerID) + if err != nil { + return nil, -1, err + } + layerMeta := storageLayerMetadata{ + CompressedSize: -1, + } + if layer.Metadata != "" { + if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { + return nil, -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err) + } + } + if layerMeta.CompressedSize <= 0 { + n = -1 + } else { + n = layerMeta.CompressedSize + } + diff, err := store.Diff("", layer.ID) + if err != nil { + return nil, -1, err + } + return diff, n, nil +} + +func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { + manifestBlob, err = s.imageRef.transport.store.GetImageBigData(s.ID, "manifest") + return manifestBlob, manifest.GuessMIMEType(manifestBlob), err +} + +func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { + return nil, "", ErrNoManifestLists +} + +func (s *storageImageSource) GetSignatures() (signatures [][]byte, err error) { + var offset int + signature, err := s.imageRef.transport.store.GetImageBigData(s.ID, "signatures") + if err != nil { + return nil, err + } + sigslice := [][]byte{} + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, fmt.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) + if err != nil { + return -1, fmt.Errorf("error reading image %q: %v", s.imageRef.id, err) + } + for _, name := range names { + bigSize, err := s.imageRef.transport.store.GetImageBigDataSize(s.imageRef.id, name) + if err != nil { + return -1, fmt.Errorf("error reading data blob size %q for %q: %v", name, s.imageRef.id, err) + } + sum += bigSize + } + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + for _, layerList := range s.Layers { + for _, layerID := range layerList { + layer, err := s.imageRef.transport.store.GetLayer(layerID) + if err != nil { + return -1, err + } + layerMeta := storageLayerMetadata{ + Size: -1, + } + if layer.Metadata != "" { + if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { + return -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err) + } + } + if layerMeta.Size < 0 { + return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layerMeta.Size + } + } + return sum, nil +} + +func (s *storageImage) Size() (int64, error) { + return s.size, nil +} + +// newImage creates an image that also knows its size +func newImage(s storageReference) (types.Image, error) { + src, err := newImageSource(s) + if err != nil { + return nil, err + } + img, err := image.FromSource(src) + if err != nil { + return nil, err + } + size, err := src.getSize() + if err != nil { + return nil, err + } + return &storageImage{Image: img, size: size}, nil +} diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go new file mode 100644 index 00000000..13413df1 --- /dev/null +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -0,0 +1,127 @@ +package storage + +import ( + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" +) + +// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte +// value hex-encoded into a 64-character string, and a reference to a Store +// where an image is, or would be, kept. +type storageReference struct { + transport storageTransport + reference string + id string + name reference.Named +} + +func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { + // We take a copy of the transport, which contains a pointer to the + // store that it used for resolving this reference, so that the + // transport that we'll return from Transport() won't be affected by + // further calls to the original transport's SetStore() method. + return &storageReference{ + transport: transport, + reference: reference, + id: id, + name: name, + } +} + +// Resolve the reference's name to an image ID in the store, if there's already +// one present with the same name or ID. +func (s *storageReference) resolveID() string { + if s.id == "" { + image, err := s.transport.store.GetImage(s.reference) + if image != nil && err == nil { + s.id = image.ID + } + } + return s.id +} + +// Return a Transport object that defaults to using the same store that we used +// to build this reference object. +func (s storageReference) Transport() types.ImageTransport { + return &storageTransport{ + store: s.transport.store, + } +} + +// Return a name with a tag, if we have a name to base them on. +func (s storageReference) DockerReference() reference.Named { + return s.name +} + +// Return a name with a tag, prefixed with the graph root and driver name, to +// disambiguate between images which may be present in multiple stores and +// share only their names. +func (s storageReference) StringWithinTransport() string { + storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]" + if s.name == nil { + return storeSpec + "@" + s.id + } + if s.id == "" { + return storeSpec + s.reference + } + return storeSpec + s.reference + "@" + s.id +} + +func (s storageReference) PolicyConfigurationIdentity() string { + return s.StringWithinTransport() +} + +// Also accept policy that's tied to the combination of the graph root and +// driver name, to apply to all images stored in the Store, and to just the +// graph root, in case we're using multiple drivers in the same directory for +// some reason. +func (s storageReference) PolicyConfigurationNamespaces() []string { + storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]" + driverlessStoreSpec := "[" + s.transport.store.GetGraphRoot() + "]" + namespaces := []string{} + if s.name != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.reference) + } + components := strings.Split(s.name.FullName(), "/") + for len(components) > 0 { + namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) + components = components[:len(components)-1] + } + } + namespaces = append(namespaces, storeSpec) + namespaces = append(namespaces, driverlessStoreSpec) + return namespaces +} + +func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) { + return newImage(s) +} + +func (s storageReference) DeleteImage(ctx *types.SystemContext) error { + id := s.resolveID() + if id == "" { + logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport()) + return ErrNoSuchImage + } + layers, err := s.transport.store.DeleteImage(id, true) + if err == nil { + logrus.Debugf("deleted image %q", id) + for _, layer := range layers { + logrus.Debugf("deleted layer %q", layer) + } + } + return err +} + +func (s storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { + return newImageSource(s) +} + +func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(s) +} diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go new file mode 100644 index 00000000..71056ff1 --- /dev/null +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -0,0 +1,285 @@ +package storage + +import ( + "errors" + "path/filepath" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/containers/storage/storage" + "github.com/docker/distribution/digest" + ddigest "github.com/docker/distribution/digest" +) + +var ( + // Transport is an ImageTransport that uses either a default + // storage.Store or one that's it's explicitly told to use. + Transport StoreTransport = &storageTransport{} + // ErrInvalidReference is returned when ParseReference() is passed an + // empty reference. + ErrInvalidReference = errors.New("invalid reference") + // ErrPathNotAbsolute is returned when a graph root is not an absolute + // path name. + ErrPathNotAbsolute = errors.New("path name is not absolute") + idRegexp = regexp.MustCompile("^(sha256:)?([0-9a-fA-F]{64})$") +) + +// StoreTransport is an ImageTransport that uses a storage.Store to parse +// references, either its own default or one that it's told to use. +type StoreTransport interface { + types.ImageTransport + // SetStore sets the default store for this transport. + SetStore(storage.Store) + // GetImage retrieves the image from the transport's store that's named + // by the reference. + GetImage(types.ImageReference) (*storage.Image, error) + // GetStoreImage retrieves the image from a specified store that's named + // by the reference. + GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) + // ParseStoreReference parses a reference, overriding any store + // specification that it may contain. + ParseStoreReference(store storage.Store, reference string) (*storageReference, error) +} + +type storageTransport struct { + store storage.Store +} + +func (s *storageTransport) Name() string { + // Still haven't really settled on a name. + return "containers-storage" +} + +// SetStore sets the Store object which the Transport will use for parsing +// references when information about a Store is not directly specified as part +// of the reference. If one is not set, the library will attempt to initialize +// one with default settings when a reference needs to be parsed. Calling +// SetStore does not affect previously parsed references. +func (s *storageTransport) SetStore(store storage.Store) { + s.store = store +} + +// ParseStoreReference takes a name or an ID, tries to figure out which it is +// relative to the given store, and returns it in a reference object. +func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { + var name reference.Named + var sum digest.Digest + var err error + if ref == "" { + return nil, ErrInvalidReference + } + if ref[0] == '[' { + // Ignore the store specifier. + closeIndex := strings.IndexRune(ref, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + ref = ref[closeIndex+1:] + } + refInfo := strings.SplitN(ref, "@", 2) + if len(refInfo) == 1 { + // A name. + name, err = reference.ParseNamed(refInfo[0]) + if err != nil { + return nil, err + } + } else if len(refInfo) == 2 { + // An ID, possibly preceded by a name. + if refInfo[0] != "" { + name, err = reference.ParseNamed(refInfo[0]) + if err != nil { + return nil, err + } + } + sum, err = digest.ParseDigest("sha256:" + refInfo[1]) + if err != nil { + return nil, err + } + } else { // Coverage: len(refInfo) is always 1 or 2 + // Anything else: store specified in a form we don't + // recognize. + return nil, ErrInvalidReference + } + storeSpec := "[" + store.GetGraphDriverName() + "@" + store.GetGraphRoot() + "]" + id := "" + if sum.Validate() == nil { + id = sum.Hex() + } + refname := "" + if name != nil { + name = reference.WithDefaultTag(name) + refname = verboseName(name) + } + if refname == "" { + logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) + } else if id == "" { + logrus.Debugf("parsed reference into %q", storeSpec+refname) + } else { + logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) + } + return newReference(storageTransport{store: store}, refname, id, name), nil +} + +func (s *storageTransport) GetStore() (storage.Store, error) { + // Return the transport's previously-set store. If we don't have one + // of those, initialize one now. + if s.store == nil { + store, err := storage.GetStore(storage.DefaultStoreOptions) + if err != nil { + return nil, err + } + s.store = store + } + return s.store, nil +} + +// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), +// possibly prefixed with a store specifier in the form "[_graphroot_]" or +// "[_driver_@_graphroot_]", tries to figure out which it is, and returns it in +// a reference object. If the _graphroot_ is a location other than the default, +// it needs to have been previously opened using storage.GetStore(), so that it +// can figure out which run root goes with the graph root. +func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + // Check if there's a store location prefix. If there is, then it + // needs to match a store that was previously initialized using + // storage.GetStore(), or be enough to let the storage library fill out + // the rest using knowledge that it has from elsewhere. + if reference[0] == '[' { + closeIndex := strings.IndexRune(reference, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + storeSpec := reference[1:closeIndex] + reference = reference[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphRoot: storeInfo[0], + }) + if err != nil { + return nil, err + } + store = store2 + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: storeInfo[0], + GraphRoot: storeInfo[1], + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // Anything else: store specified in a form we don't + // recognize. + return nil, ErrInvalidReference + } + } + return s.ParseStoreReference(store, reference) +} + +func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { + dref := ref.DockerReference() + if dref == nil { + if sref, ok := ref.(*storageReference); ok { + if sref.id != "" { + if img, err := store.GetImage(sref.id); err == nil { + return img, nil + } + } + } + return nil, ErrInvalidReference + } + return store.GetImage(verboseName(dref)) +} + +func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + return s.GetStoreImage(store, ref) +} + +func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { + // Check that there's a store location prefix. Values we're passed are + // expected to come from PolicyConfigurationIdentity or + // PolicyConfigurationNamespaces, so if there's no store location, + // something's wrong. + if scope[0] != '[' { + return ErrInvalidReference + } + // Parse the store location prefix. + closeIndex := strings.IndexRune(scope, ']') + if closeIndex < 1 { + return ErrInvalidReference + } + storeSpec := scope[1:closeIndex] + scope = scope[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return ErrPathNotAbsolute + } + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return ErrPathNotAbsolute + } + } else { + // Anything else: store specified in a form we don't + // recognize. + return ErrInvalidReference + } + // That might be all of it, and that's okay. + if scope == "" { + return nil + } + // But if there is anything left, it has to be a name, with or without + // a tag, with or without an ID, since we don't return namespace values + // that are just bare IDs. + scopeInfo := strings.SplitN(scope, "@", 2) + if len(scopeInfo) == 1 && scopeInfo[0] != "" { + _, err := reference.ParseNamed(scopeInfo[0]) + if err != nil { + return err + } + } else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" { + _, err := reference.ParseNamed(scopeInfo[0]) + if err != nil { + return err + } + _, err = ddigest.ParseDigest("sha256:" + scopeInfo[1]) + if err != nil { + return err + } + } else { + return ErrInvalidReference + } + return nil +} + +func verboseName(name reference.Named) string { + name = reference.WithDefaultTag(name) + tag := "" + if tagged, ok := name.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + return name.FullName() + ":" + tag +} diff --git a/vendor/github.com/containers/image/transports/transports.go b/vendor/github.com/containers/image/transports/transports.go index 97613517..d4141df0 100644 --- a/vendor/github.com/containers/image/transports/transports.go +++ b/vendor/github.com/containers/image/transports/transports.go @@ -9,6 +9,7 @@ import ( "github.com/containers/image/docker/daemon" ociLayout "github.com/containers/image/oci/layout" "github.com/containers/image/openshift" + "github.com/containers/image/storage" "github.com/containers/image/types" ) @@ -25,6 +26,7 @@ func init() { daemon.Transport, ociLayout.Transport, openshift.Transport, + storage.Transport, } { name := t.Name() if _, ok := KnownTransports[name]; ok { diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 9fd1de5e..a05a7064 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -1,6 +1,7 @@ package types import ( + "errors" "io" "time" @@ -127,6 +128,7 @@ type ImageSource interface { // // There is a specific required order for some of the calls: // PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest // PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) // Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. // @@ -158,6 +160,10 @@ type ImageDestination interface { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error) + // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. A false result will often be accompanied by an ErrBlobNotFound error. + HasBlob(info BlobInfo) (bool, int64, error) + // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree. + ReapplyBlob(info BlobInfo) (BlobInfo, error) // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. PutManifest([]byte) error PutSignatures(signatures [][]byte) error @@ -212,6 +218,9 @@ type Image interface { UpdatedImage(options ManifestUpdateOptions) (Image, error) // IsMultiImage returns true if the image's manifest is a list of images, false otherwise. IsMultiImage() bool + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) } // ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest @@ -284,3 +293,8 @@ type SystemContext struct { // in order to not break any existing docker's integration tests. DockerDisableV1Ping bool } + +var ( + // ErrBlobNotFound can be returned by an ImageDestination's HasBlob() method + ErrBlobNotFound = errors.New("no such blob present") +) diff --git a/vendor/github.com/containers/storage/LICENSE b/vendor/github.com/containers/storage/LICENSE new file mode 100644 index 00000000..8f3fee62 --- /dev/null +++ b/vendor/github.com/containers/storage/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go new file mode 100644 index 00000000..f9b58bf5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -0,0 +1,586 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + mountpk "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/stringid" + + "github.com/opencontainers/runc/libcontainer/label" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + uidMaps: uidMaps, + gidMaps: gidMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if rsystem.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// GetMetadata not implemented +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return a.Create(id, parent, mountLabel, storageOpt) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIds(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) + if err != nil { + return err + } + // Directory permission is 0755. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + if err := a.unmount(mountpoint); err != nil { + // no need to return here, we can still try to remove since the `Rename` will fail below if still mounted + logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err) + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpMntPath) + + tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpDiffpath) + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +// Get returns the rootfs path for the id. +// This will mount the dir at it's given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, mountLabel, parents); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (archive.Archive, error) { + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, diff archive.Reader) error { + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { + // AUFS doesn't need the parent id to apply the diff. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIds(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len("dirperm1") + } + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + firstMount := true + i := 0 + + for { + for ; i < len(ro); i++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[i]) + + if firstMount { + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } else { + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + return + } + } + } + + if firstMount { + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + firstMount = false + } + + if i == len(ro) { + break + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go new file mode 100644 index 00000000..eb298d9e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIds(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go new file mode 100644 index 00000000..da1e892f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go new file mode 100644 index 00000000..8062bae4 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "syscall" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go new file mode 100644 index 00000000..d030b066 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go new file mode 100644 index 00000000..a85c3c77 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -0,0 +1,520 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/docker/go-units" + "github.com/opencontainers/runc/libcontainer/label" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +var ( + quotaEnabled = false + userDiskQuota = false +) + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, err := parseOptions(options) + if err != nil { + return nil, err + } + + if userDiskQuota { + if err := subvolEnableQuota(home); err != nil { + return nil, err + } + quotaEnabled = true + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + options: opt, + } + + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (btrfsOptions, error) { + var options btrfsOptions + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// GetMetadata returns empty metadata for this driver. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + if quotaEnabled { + if err := subvolDisableQuota(d.home); err != nil { + return err + } + } + + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat syscall.Stat_t + if err := syscall.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func subvolEnableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolDisableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_DISABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolRescanQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if !quotaEnabled { + if err := subvolEnableQuota(d.home); err != nil { + return err + } + quotaEnabled = true + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + if err := subvolRescanQuota(d.home); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go new file mode 100644 index 00000000..f0708888 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go new file mode 100644 index 00000000..73d90cdd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go new file mode 100644 index 00000000..f802fbc6 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go new file mode 100644 index 00000000..5ea604f5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -0,0 +1,67 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increaes the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count++ + c.mu.Unlock() + return m.count +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count-- + c.mu.Unlock() + return m.count +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go new file mode 100644 index 00000000..0accf31f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -0,0 +1,2661 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/loopback" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/storageversion" + "github.com/docker/go-units" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + logLevel = devicemapper.LogLevelFatal + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in docker inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + if (devices.deviceIDMap[deviceID/8] & mask) != 0 { + return false + } + return true +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debugf("devmapper: constructDeviceIDMap()") + defer logrus.Debugf("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debugf("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemoval(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return false + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return true + } + } + + if err := s.Err(); err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + } + return false +} + +func determineDefaultFS() string { + if xfsSupported() { + return "xfs" + } + + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + return "ext4" +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + args := []string{} + for _, arg := range devices.mkfsArgs { + args = append(args, arg) + } + + args = append(args, devname) + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + if err := devices.poolHasFreeSpace(); err != nil { + return err + } + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/docker/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + } + + defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +// DMLog implements logging using DevMapperLogger interface. +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > logLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return graphdriver.ErrNotSupported + } + + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + if storageversion.IAmStatic == "true" { + logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + } else { + logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + } + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool()") + defer logrus.Debug("devmapper: deactivatePool END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } + } else { + if err := devices.removeDevice(info.Name()); err != nil { + return err + } + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err == nil { + break + } + + if err == devicemapper.ErrEnxio { + // Device is probably already gone. Return success. + return nil + } + + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + // ignore the error since it's just a best effort to not try to unmount something that's mounted + mounts, _ := mount.GetMounts() + mounted := make(map[string]bool, len(mounts)) + for _, mnt := range mounts { + mounted[mnt.Mountpoint] = true + } + + if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if p == path.Join(home, "mnt") { + return nil + } + if !info.IsDir() { + return nil + } + + if mounted[p] { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) + } + } + + if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) + } + } + + return nil + }); err != nil && !os.IsNotExist(err) { + devices.Unlock() + return err + } + + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + return err + } + logrus.Debug("devmapper: Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + return nil +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + // Pick up initialization settings, if any were saved before + defaultsFile := path.Join(root, "defaults") + defaultsBytes, err := ioutil.ReadFile(defaultsFile) + defaults := []string{} + settings := map[string]string{} + if err == nil && len(defaultsBytes) > 0 { + defaults = strings.Split(string(defaultsBytes), "\n") + } + + foundBlkDiscard := false + nthOption := 0 + for _, option := range append(defaults, options...) { + nthOption = nthOption + 1 + if len(option) == 0 { + continue + } + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + default: + if nthOption > len(defaults) { + return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + } + logrus.Errorf("devmapper: Unknown option %s, ignoring\n", key) + } + settings[key] = val + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + // Save these settings along with the other metadata + defaults = []string{} + for key, val := range settings { + defaults = append(defaults, key+"="+val) + } + defaultsBytes = []byte(strings.Join(defaults, "\n") + "\n") + if err := ioutils.AtomicWriteFile(defaultsFile, defaultsBytes, 0600); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go new file mode 100644 index 00000000..9ab3e4f8 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go new file mode 100644 index 00000000..1fc3a7b3 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -0,0 +1,226 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/docker/go-units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// GetMetadata returns a map of information about the device. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go new file mode 100644 index 00000000..cca1fe1b --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go new file mode 100644 index 00000000..e267f618 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -0,0 +1,243 @@ +package graphdriver + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites retuned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. + CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and mountLabel. Parent and mountLabel may be "". + Create(id, parent, mountLabel string, storageOpt map[string]string) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (archive.Archive, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + if pluginDriver, err := lookupPlugin(name, home, options); err == nil { + return pluginDriver, nil + } + logrus.Errorf("Failed to GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// New creates the driver and initializes it at the specified root. +func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, root, options, uidMaps, gidMaps) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver %q", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go new file mode 100644 index 00000000..2891a84f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "syscall" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go new file mode 100644 index 00000000..2b421e32 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -0,0 +1,133 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/mount" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "devicemapper", + "overlay", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go new file mode 100644 index 00000000..29719ffa --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -0,0 +1,65 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + log "github.com/Sirupsen/logrus" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go new file mode 100644 index 00000000..4a875608 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go new file mode 100644 index 00000000..ffd30c29 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go new file mode 100644 index 00000000..0f5b4fe4 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -0,0 +1,157 @@ +package graphdriver + +import ( + "time" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + gdw := &NaiveDiffDriver{ + ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + return gdw +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { + layerFs, err := gdw.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + gdw.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + gdw.Put(id) + return err + }), nil + } + + parentFs, err := gdw.Get(parent, "") + if err != nil { + return nil, err + } + defer gdw.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + gdw.Put(id) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + layerFs, err := gdw.Get(id, "") + if err != nil { + return nil, err + } + defer gdw.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = gdw.Get(parent, "") + if err != nil { + return nil, err + } + defer gdw.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := gdw.Get(id, "") + if err != nil { + return + } + defer gdw.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := gdw.Get(id, "") + if err != nil { + return + } + defer gdw.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/copy.go b/vendor/github.com/containers/storage/drivers/overlay/copy.go new file mode 100644 index 00000000..703d51a8 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/copy.go @@ -0,0 +1,169 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go new file mode 100644 index 00000000..ec4aed46 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -0,0 +1,450 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + + "github.com/containers/storage/pkg/mount" + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") + backingFs = "" +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When an overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with an empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) { + + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do an overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + return rootDir, nil + } + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + var ( + lowerDir = path.Join(d.dir(string(lowerID)), "root") + upperDir = path.Join(dir, "upper") + workDir = path.Join(dir, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + ) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + err := syscall.Unmount(mountpoint, 0) + if err != nil { + rootDir := path.Join(d.dir(id), "root") + if _, err := os.Stat(rootDir); err == nil { + // We weren't mounting a "merged" directory anyway + return nil + } + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return err +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go new file mode 100644 index 00000000..3dbb4de4 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay2/mount.go b/vendor/github.com/containers/storage/drivers/overlay2/mount.go new file mode 100644 index 00000000..73bf9264 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay2/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay2 + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/containers/storage/pkg/reexec" +) + +func init() { + reexec.Register("docker-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: 0, + Label: label, + } + + cmd := reexec.Command("docker-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + } + return nil +} + +// mountfromMain is the entry-point for docker-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay2/overlay.go b/vendor/github.com/containers/storage/drivers/overlay2/overlay.go new file mode 100644 index 00000000..b9a1fa85 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay2/overlay.go @@ -0,0 +1,514 @@ +// +build linux + +package overlay2 + +import ( + "bufio" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/parsers/kernel" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + driverName = "overlay2" + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifer + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +var backingFs = "" + +func init() { + graphdriver.Register(driverName, Init) +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + opts, err := parseOptions(options) + if err != nil { + return nil, err + } + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // require kernel 4.0.0 to ensure multiple lower dirs are supported + v, err := kernel.GetKernelVersion() + if err != nil { + return nil, err + } + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + return nil, graphdriver.ErrNotSupported + } + logrus.Warnf("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay2' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + } + + return d, nil +} + +type overlayOptions struct { + overrideKernelCheck bool +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "overlay2.override_kernel_check": + o.overrideKernelCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("overlay2: Unknown option %s\n", key) + } + } + return o, nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return driverName +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + } +} + +// GetMetadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) { + + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + // if no parent directory, done + if parent == "" { + return nil + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + if len(lowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lp, err := os.Readlink(path.Join(d.home, s)) + if err != nil { + return nil, err + } + lowersArray = append(lowersArray, path.Clean(path.Join(d.home, "link", lp))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + diffDir := path.Join(dir, "diff") + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, just return diff directory + if os.IsNotExist(err) { + return diffDir, nil + } + return "", err + } + + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + + workDir := path.Join(dir, "work") + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + mountLabel = label.FormatMountLabel(opts, mountLabel) + if len(mountLabel) > syscall.Getpagesize() { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel)) + } + + if err := mountFrom(d.home, "overlay", path.Join(id, "merged"), "overlay", mountLabel); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + err := syscall.Unmount(mountpoint, 0) + if err != nil { + if _, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)); err != nil { + // We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway + return nil + } + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return err +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { + applyDir := d.getDiffPath(id) + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(diff, applyDir, &archive.TarOptions{ + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }); err != nil { + return 0, err + } + + return d.DiffSize(id, parent) +} + +func (d *Driver) getDiffPath(id string) string { + dir := d.dir(id) + + return path.Join(dir, "diff") +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + return directory.Size(d.getDiffPath(id)) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id, parent string) (archive.Archive, error) { + diffPath := d.getDiffPath(id) + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath := d.getDiffPath(id) + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay2/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay2/overlay_unsupported.go new file mode 100644 index 00000000..e5ac4ca8 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay2/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay2 diff --git a/vendor/github.com/containers/storage/drivers/overlay2/randomid.go b/vendor/github.com/containers/storage/drivers/overlay2/randomid.go new file mode 100644 index 00000000..af5cb659 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay2/randomid.go @@ -0,0 +1,80 @@ +// +build linux + +package overlay2 + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/containers/storage/drivers/plugin.go b/vendor/github.com/containers/storage/drivers/plugin.go new file mode 100644 index 00000000..a76aae6e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/plugin.go @@ -0,0 +1,32 @@ +// +build experimental + +package graphdriver + +import ( + "fmt" + "io" + + "github.com/containers/storage/pkg/plugins" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + pl, err := plugins.Get(name, "GraphDriver") + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, home, opts, pl.Client()) +} + +func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) { + proxy := &graphDriverProxy{name, c} + return proxy, proxy.Init(home, opts) +} diff --git a/vendor/github.com/containers/storage/drivers/plugin_unsupported.go b/vendor/github.com/containers/storage/drivers/plugin_unsupported.go new file mode 100644 index 00000000..daa7a170 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/plugin_unsupported.go @@ -0,0 +1,7 @@ +// +build !experimental + +package graphdriver + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + return nil, ErrNotSupported +} diff --git a/vendor/github.com/containers/storage/drivers/proxy.go b/vendor/github.com/containers/storage/drivers/proxy.go new file mode 100644 index 00000000..1bab2ef3 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/proxy.go @@ -0,0 +1,225 @@ +// +build experimental + +package graphdriver + +import ( + "errors" + "fmt" + + "github.com/containers/storage/pkg/archive" +) + +type graphDriverProxy struct { + name string + client pluginClient +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string +} + +func (d *graphDriverProxy) Init(home string, opts []string) error { + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return ret.Dir, err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.client.Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return archive.Archive(body), nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go new file mode 100644 index 00000000..7743dced --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/containers/storage/drivers/aufs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go new file mode 100644 index 00000000..40ff1cdd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/containers/storage/drivers/btrfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go new file mode 100644 index 00000000..08ac984b --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/containers/storage/drivers/devmapper" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go new file mode 100644 index 00000000..3b6c5f85 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -0,0 +1,9 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/containers/storage/drivers/overlay" + _ "github.com/containers/storage/drivers/overlay2" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_vfs.go b/vendor/github.com/containers/storage/drivers/register/register_vfs.go new file mode 100644 index 00000000..691ce859 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/containers/storage/drivers/vfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_windows.go b/vendor/github.com/containers/storage/drivers/register/register_windows.go new file mode 100644 index 00000000..048b2709 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/containers/storage/drivers/windows" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go new file mode 100644 index 00000000..c748468e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/containers/storage/drivers/zfs" +) diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go new file mode 100644 index 00000000..36a975e3 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -0,0 +1,145 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS new file mode 100644 index 00000000..9c270c54 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go new file mode 100644 index 00000000..16426337 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -0,0 +1,405 @@ +// +build linux freebsd solaris + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/runc/libcontainer/label" +) + +type zfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + logrus.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + if err := mount.MakePrivate(base); err != nil { + return nil, err + } + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// GetMetadata returns image/container metadata related to graph driver +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id string, parent string, mountLabel string, storageOpt map[string]string) error { + err := d.create(id, parent, storageOpt) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, storageOpt) +} + +func (d *Driver) create(id, parent string, storageOpt map[string]string) error { + name := d.zfsPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + + filesystem := d.zfsPath(id) + options := label.FormatMountLabel("", mountLabel) + logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + // this could be our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + mount.Unmount(mountpoint) + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) + if err != nil || !mounted { + return err + } + + logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + + err = mount.Unmount(mountpoint) + if err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return err +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] == true +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go new file mode 100644 index 00000000..a25e2a45 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/drivers" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go new file mode 100644 index 00000000..6aa41d90 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/drivers" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go new file mode 100644 index 00000000..56d09cac --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go @@ -0,0 +1,59 @@ +// +build solaris,cgo + +package zfs + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "strings" + "unsafe" + + log "github.com/Sirupsen/logrus" + "github.com/containers/storage/drivers" +) + +func checkRootdirFs(rootdir string) error { + + cs := C.CString(filepath.Dir(rootdir)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) + return graphdriver.ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return nil +} + +/* rootfs is introduced to comply with the OCI spec +which states that root filesystem must be mounted at /rootfs/ instead of / +*/ +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") + } + + return filepath.Join(id[:maxlen], "rootfs", "root") +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go new file mode 100644 index 00000000..ce8daada --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go new file mode 100644 index 00000000..98197a02 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -0,0 +1,1147 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/promise" + "github.com/containers/storage/pkg/system" +) + +type ( + // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. + Archive io.ReadCloser + // Reader is a type of io.Reader. + Reader io.Reader + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + // TarChownOptions wraps the chown options UID and GID. + TarChownOptions struct { + UID, GID int + } + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *TarChownOptions + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + // ErrNotImplemented is the error message of function not implemented. + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} +) + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresseses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) error + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if err != nil { + return err + } + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + hdr.Gid = xGID + } + + if ta.WhiteoutConverter != nil { + if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil { + return err + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (eg !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !exceptions { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range patterns { + if pat[0] != '!' { + continue + } + pat = pat[1:] + string(filepath.Separator) + if strings.HasPrefix(pat, dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go new file mode 100644 index 00000000..e944ca2a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -0,0 +1,91 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return err + } + if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + *hdr = tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return nil +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go new file mode 100644 index 00000000..54acbf28 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go new file mode 100644 index 00000000..19d731fd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -0,0 +1,112 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + inode = uint64(s.Ino) + + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go new file mode 100644 index 00000000..828d3b9d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go new file mode 100644 index 00000000..234b0a68 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -0,0 +1,446 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go new file mode 100644 index 00000000..798d7bfc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -0,0 +1,312 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go new file mode 100644 index 00000000..e1d1e7a9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go new file mode 100644 index 00000000..43dd94e2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go new file mode 100644 index 00000000..06eadd66 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go new file mode 100644 index 00000000..05c243d3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -0,0 +1,458 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/system" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content Archive, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between it's directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go new file mode 100644 index 00000000..e305b5e4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go new file mode 100644 index 00000000..2b775b45 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go new file mode 100644 index 00000000..eba71738 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -0,0 +1,279 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + if options == nil { + options = &TarOptions{} + } + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID + } + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go new file mode 100644 index 00000000..ef339446 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/github.com/containers/storage/pkg/archive/time_linux.go new file mode 100644 index 00000000..3448569b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go new file mode 100644 index 00000000..e85aac05 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go new file mode 100644 index 00000000..d20478a1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go new file mode 100644 index 00000000..dfb335c0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go new file mode 100644 index 00000000..649575c0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -0,0 +1,97 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" +) + +var chrootArchiver = &archive.Archiver{Untar: Untar} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go new file mode 100644 index 00000000..5b26a520 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go new file mode 100644 index 00000000..93fde422 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 00000000..54b5ff48 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,103 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/mount" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + if err := mount.MakeRPrivate(path); err != nil { + return err + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + + if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = fmt.Errorf("error unmounting root: %v", errCleanup) + } + return + } + }() + + if err := syscall.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 00000000..16354bf6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive + +import "syscall" + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go new file mode 100644 index 00000000..377aeb9f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go @@ -0,0 +1,19 @@ +package chrootarchive + +import "github.com/containers/storage/pkg/archive" + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go new file mode 100644 index 00000000..23daa721 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,120 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir = "" + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go new file mode 100644 index 00000000..cfada9b1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,44 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) + } + + return s, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go new file mode 100644 index 00000000..7b8a46f3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/containers/storage/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go new file mode 100644 index 00000000..fa17c9bf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go new file mode 100644 index 00000000..838b06af --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -0,0 +1,807 @@ +// +build linux + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// DevmapperLogger defines methods for logging with devicemapper. +type DevmapperLogger interface { + DMLog(level int, file string, line int, dmError int, message string) +} + +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) + return ErrUdevWait + } + return nil +} + +// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger + +// LogInit initializes the logger for the device mapper library. +func LogInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(&cookie) + + dmSawBusy = false // reset before the task is run + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + if err = task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + var cookie uint + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(&cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + if doSuspend { + ResumeDevice(baseName) + } + return err + } + + if err := task.setSector(0); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) + + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go new file mode 100644 index 00000000..8477e36f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,35 @@ +// +build linux + +package devicemapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// DevmapperLogCallback exports the devmapper log callback for cgo. +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 00000000..91fbc85b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,251 @@ +// +build linux + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 00000000..dc361eab --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,34 @@ +// +build linux,!libdm_no_deferred_remove + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +*/ +import "C" + +// LibraryDeferredRemovalSupport is supported when statically linked. +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 00000000..4a6665de --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalsupport is not supported when statically linked. +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go new file mode 100644 index 00000000..581b57eb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -0,0 +1,27 @@ +// +build linux + +package devicemapper + +import ( + "syscall" + "unsafe" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go new file mode 100644 index 00000000..cee5e545 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go new file mode 100644 index 00000000..1715ef45 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory.go @@ -0,0 +1,26 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go new file mode 100644 index 00000000..397251bd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -0,0 +1,48 @@ +// +build linux freebsd solaris + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go new file mode 100644 index 00000000..6fb0917c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go new file mode 100644 index 00000000..763d8d27 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -0,0 +1,283 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/Sirupsen/logrus" +) + +// exclusion returns true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty returns true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doesn't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := regexpMatch(pattern, file) + if err != nil { + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), + strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +// +// As per the comment in golangs filepath.Match, on Windows, escaping +// is disabled. Instead, '\\' is treated as path separator. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" + + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err + } + + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if strings.Index(".$", string(ch)) != -1 { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + res, err := regexp.MatchString(regStr, path) + + // Map regexp's error to filepath's so no one knows we're not using filepath + if err != nil { + err = filepath.ErrBadPattern + } + + return res, err +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 00000000..ccd648fa --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 00000000..0f2cb7ab --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go new file mode 100644 index 00000000..d5c3abf5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go new file mode 100644 index 00000000..5ec21cac --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go new file mode 100644 index 00000000..8154e83f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go new file mode 100644 index 00000000..6bca4662 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -0,0 +1,197 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + + if uidMap != nil { + xUID, err := ToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + uid = xUID + } + if gidMap != nil { + xGID, err := ToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + gid = xGID + } + return uid, gid, nil +} + +// ToContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func ToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// ToHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func ToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// CreateIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, nil, err + } + if len(subuidRanges) == 0 { + return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return createIDMap(subuidRanges), createIDMap(subgidRanges), nil +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go new file mode 100644 index 00000000..b2cfb05e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -0,0 +1,60 @@ +// +build !windows + +package idtools + +import ( + "os" + "path/filepath" + + "github.com/containers/storage/pkg/system" +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + if err := os.Chown(path, ownerUID, ownerGID); err != nil { + return err + } + // short-circuit--we were called with an existing directory and chown was requested + return nil + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go new file mode 100644 index 00000000..0cad1736 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 00000000..4a4aaed0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,188 @@ +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 00000000..d98b354c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go new file mode 100644 index 00000000..3d737b3e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fmt.go b/vendor/github.com/containers/storage/pkg/ioutils/fmt.go new file mode 100644 index 00000000..0b04b0ba --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fmt.go @@ -0,0 +1,22 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} + +// FprintfIfTrue prints the boolean value if it's true +func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { + if ok { + return fmt.Fprintf(w, format, ok) + } + return 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go new file mode 100644 index 00000000..6dc50a03 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -0,0 +1,82 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/multireader.go b/vendor/github.com/containers/storage/pkg/ioutils/multireader.go new file mode 100644 index 00000000..0d2d76b4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/multireader.go @@ -0,0 +1,226 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + var rdr io.ReadSeeker + var rdrOffset int64 + + for i, rdr := range r.readers { + offsetTo, err := r.getOffsetToReader(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo > offset { + rdr = r.readers[i-1] + rdrOffset = offsetTo - offset + break + } + + if rdr == r.readers[len(r.readers)-1] { + rdrOffset = offsetTo + offset + break + } + } + + return rdr, rdrOffset, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bCap := int64(cap(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bCap) + if err != nil && err != io.EOF { + return -1, err + } + bCap -= readBytes + + if bCap == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go new file mode 100644 index 00000000..63f3c07f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go @@ -0,0 +1,154 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go new file mode 100644 index 00000000..1539ad21 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go new file mode 100644 index 00000000..c719c120 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/containers/storage/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go new file mode 100644 index 00000000..52a4901a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go new file mode 100644 index 00000000..ccc7f9c2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go new file mode 100644 index 00000000..971f45eb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -0,0 +1,137 @@ +// +build linux + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go new file mode 100644 index 00000000..0714eb5f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -0,0 +1,53 @@ +// +build linux + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go new file mode 100644 index 00000000..e1100ce1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go new file mode 100644 index 00000000..bc047928 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("Error loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mflag/LICENSE b/vendor/github.com/containers/storage/pkg/mflag/LICENSE new file mode 100644 index 00000000..9b4f4a29 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mflag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go new file mode 100644 index 00000000..607dbed4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000..f166cb2f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go new file mode 100644 index 00000000..dc696dce --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go @@ -0,0 +1,85 @@ +package mount + +import ( + "syscall" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = syscall.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = syscall.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = syscall.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = syscall.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = syscall.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = syscall.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = syscall.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = syscall.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = syscall.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = syscall.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = syscall.MS_BIND | syscall.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = syscall.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = syscall.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = syscall.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = syscall.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = syscall.MS_SHARED | syscall.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = syscall.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = syscall.MS_STRICTATIME +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go new file mode 100644 index 00000000..5564f7b3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -0,0 +1,30 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go new file mode 100644 index 00000000..66ac4bf4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -0,0 +1,74 @@ +package mount + +import ( + "time" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil +} + +// Unmount will unmount the target filesystem, so long as it is mounted. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// ForceUnmount will force an unmount of the target filesystem, regardless if +// it is mounted or not. +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go new file mode 100644 index 00000000..bb870e6f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go new file mode 100644 index 00000000..dd4280c7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go @@ -0,0 +1,21 @@ +package mount + +import ( + "syscall" +) + +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err + } + + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go new file mode 100644 index 00000000..c684aa81 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go @@ -0,0 +1,33 @@ +// +build solaris,cgo + +package mount + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000..a2a3bb45 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go new file mode 100644 index 00000000..e3fc3535 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 00000000..4f32edcd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000..be69fee1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go new file mode 100644 index 00000000..ad9ab57f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 00000000..7fbcf192 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go new file mode 100644 index 00000000..dab8a37e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 00000000..8ceec84b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000..7738fc74 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 00000000..71f205b2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 00000000..54a89d28 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,30 @@ +// +build linux freebsd solaris + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 00000000..80fab8ff --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,69 @@ +// +build windows + +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000..bb9b3264 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthrough for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 00000000..49370bd3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000..1da3f239 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go new file mode 100644 index 00000000..acc89716 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go new file mode 100644 index 00000000..a15e3688 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool *sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool *sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/promise/promise.go b/vendor/github.com/containers/storage/pkg/promise/promise.go new file mode 100644 index 00000000..dd52b908 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/containers/storage/pkg/random/random.go b/vendor/github.com/containers/storage/pkg/random/random.go new file mode 100644 index 00000000..70de4d13 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/random/random.go @@ -0,0 +1,71 @@ +package random + +import ( + cryptorand "crypto/rand" + "io" + "math" + "math/big" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + return &lockedSource{ + src: rand.NewSource(seed), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go new file mode 100644 index 00000000..3c3a73a9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which have Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go new file mode 100644 index 00000000..b70edcb3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd solaris + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which have Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go new file mode 100644 index 00000000..9aed004e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!solaris + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux and Windows. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go new file mode 100644 index 00000000..8d65e0ae --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which have Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go new file mode 100644 index 00000000..c56671d9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go new file mode 100644 index 00000000..74dfaaaa --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -0,0 +1,71 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + "crypto/rand" + "encoding/hex" + "io" + "regexp" + "strconv" + "strings" + + "github.com/containers/storage/pkg/random" +) + +const shortLen = 12 + +var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + trimTo := shortLen + if len(id) < shortLen { + trimTo = len(id) + } + return id[:trimTo] +} + +func generateID(crypto bool) string { + b := make([]byte, 32) + r := random.Reader + if crypto { + r = rand.Reader + } + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(true) + +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(false) +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go new file mode 100644 index 00000000..7637f12e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes.go @@ -0,0 +1,52 @@ +package system + +import ( + "os" + "syscall" + "time" + "unsafe" +) + +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go new file mode 100644 index 00000000..09d58bcb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go new file mode 100644 index 00000000..29458684 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -0,0 +1,27 @@ +// +build windows + +package system + +import ( + "syscall" + "time" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) + pathp, e := syscall.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := syscall.CreateFile(pathp, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, + syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer syscall.Close(h) + c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) + return syscall.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go new file mode 100644 index 00000000..28831898 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/containers/storage/pkg/system/events_windows.go b/vendor/github.com/containers/storage/pkg/system/events_windows.go new file mode 100644 index 00000000..04e2de78 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/events_windows.go @@ -0,0 +1,83 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys.go b/vendor/github.com/containers/storage/pkg/system/filesys.go new file mode 100644 index 00000000..c14feb84 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/filesys.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "os" + "path/filepath" +) + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go new file mode 100644 index 00000000..16823d55 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go @@ -0,0 +1,82 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "syscall" +) + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, perm os.FileMode) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat.go b/vendor/github.com/containers/storage/pkg/system/lstat.go new file mode 100644 index 00000000..bd23c4d5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go new file mode 100644 index 00000000..49e87eb4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package system + +import ( + "os" +) + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &StatT{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo.go b/vendor/github.com/containers/storage/pkg/system/meminfo.go new file mode 100644 index 00000000..3b6e947e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go new file mode 100644 index 00000000..385f1d5e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go new file mode 100644 index 00000000..313c601b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -0,0 +1,128 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("Error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go new file mode 100644 index 00000000..3ce019df --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go new file mode 100644 index 00000000..d4664259 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go @@ -0,0 +1,44 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go new file mode 100644 index 00000000..73958182 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go new file mode 100644 index 00000000..2e863c02 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go new file mode 100644 index 00000000..c607c4db --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go new file mode 100644 index 00000000..cbfe2c15 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat.go b/vendor/github.com/containers/storage/pkg/system/stat.go new file mode 100644 index 00000000..087034c5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat.go @@ -0,0 +1,53 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// GetLastModification returns file's last modification time. +func (s StatT) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go new file mode 100644 index 00000000..d0fb6f15 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -0,0 +1,27 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go new file mode 100644 index 00000000..8b1eded1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.StatT from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go new file mode 100644 index 00000000..3c3b71fb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -0,0 +1,15 @@ +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go new file mode 100644 index 00000000..0216985a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -0,0 +1,34 @@ +// +build solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} + +// FromStatT loads a system.StatT from a syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unsupported.go b/vendor/github.com/containers/storage/pkg/system/stat_unsupported.go new file mode 100644 index 00000000..f53e9de4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd,!solaris,!openbsd + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go new file mode 100644 index 00000000..39490c62 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like name, permission, size, etc about a file. +type StatT struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +// Name returns file's name. +func (s StatT) Name() string { + return s.name +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return s.mode +} + +// ModTime returns file's last modification time. +func (s StatT) ModTime() time.Time { + return s.modTime +} + +// IsDir returns whether file is actually a directory. +func (s StatT) IsDir() bool { + return s.isDir +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go new file mode 100644 index 00000000..3ae91284 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "syscall" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go new file mode 100644 index 00000000..f5f2d569 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go @@ -0,0 +1,103 @@ +package system + +import ( + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +var ( + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := syscall.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := syscall.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(syscall.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go new file mode 100644 index 00000000..3d0146b0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go new file mode 100644 index 00000000..13f1de17 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_darwin.go b/vendor/github.com/containers/storage/pkg/system/utimes_darwin.go new file mode 100644 index 00000000..0a161975 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_darwin.go @@ -0,0 +1,8 @@ +package system + +import "syscall" + +// LUtimesNano is not supported by darwin platform. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go new file mode 100644 index 00000000..e2eac3b5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go new file mode 100644 index 00000000..fc8a1aba --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go @@ -0,0 +1,26 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + atFdCwd := -100 + atSymLinkNoFollow := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000..50c3a043 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd,!darwin + +package system + +import "syscall" + +// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go new file mode 100644 index 00000000..d2e2c057 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -0,0 +1,63 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000..0114f222 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/storage/containers.go b/vendor/github.com/containers/storage/storage/containers.go new file mode 100644 index 00000000..5398f7a7 --- /dev/null +++ b/vendor/github.com/containers/storage/storage/containers.go @@ -0,0 +1,478 @@ +package storage + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID + ErrContainerUnknown = errors.New("container not known") +) + +// A Container is a reference to a read-write layer with metadata. +type Container struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // container can be referred to by its ID or any of its names. Names + // are unique among containers. + Names []string `json:"names,omitempty"` + + // ImageID is the ID of the image which was used to create the container. + ImageID string `json:"image"` + + // LayerID is the ID of the read-write layer for the container itself. + // It is assumed that the image's top layer is the parent of the container's + // read-write layer. + LayerID string `json:"layer"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ContainerStore provides bookkeeping for information about Containers. +type ContainerStore interface { + FileBasedStore + MetadataStore + BigDataStore + FlaggableStore + + // Create creates a container that has a specified ID (or generates a + // random one if an empty value is supplied) and optional names, + // based on the specified image, using the specified layer as its + // read-write layer. + Create(id string, names []string, image, layer, metadata string) (*Container, error) + + // SetNames updates the list of names associated with the container + // with the specified ID. + SetNames(id string, names []string) error + + // Get retrieves information about a container given an ID or name. + Get(id string) (*Container, error) + + // Exists checks if there is a container with the given ID or name. + Exists(id string) bool + + // Delete removes the record of the container. + Delete(id string) error + + // Wipe removes records of all containers. + Wipe() error + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Containers returns a slice enumerating the known containers. + Containers() ([]Container, error) +} + +type containerStore struct { + lockfile Locker + dir string + containers []Container + byid map[string]*Container + bylayer map[string]*Container + byname map[string]*Container +} + +func (r *containerStore) Containers() ([]Container, error) { + return r.containers, nil +} + +func (r *containerStore) containerspath() string { + return filepath.Join(r.dir, "containers.json") +} + +func (r *containerStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *containerStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +func (r *containerStore) Load() error { + needSave := false + rpath := r.containerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + containers := []Container{} + layers := make(map[string]*Container) + ids := make(map[string]*Container) + names := make(map[string]*Container) + if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { + for n, container := range containers { + ids[container.ID] = &containers[n] + layers[container.LayerID] = &containers[n] + for _, name := range container.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + needSave = true + } + names[name] = &containers[n] + } + } + } + r.containers = containers + r.byid = ids + r.bylayer = layers + r.byname = names + if needSave { + r.Touch() + return r.Save() + } + return nil +} + +func (r *containerStore) Save() error { + rpath := r.containerspath() + jdata, err := json.Marshal(&r.containers) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newContainerStore(dir string) (ContainerStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + cstore := containerStore{ + lockfile: lockfile, + dir: dir, + containers: []Container{}, + byid: make(map[string]*Container), + bylayer: make(map[string]*Container), + byname: make(map[string]*Container), + } + if err := cstore.Load(); err != nil { + return nil, err + } + return &cstore, nil +} + +func (r *containerStore) ClearFlag(id string, flag string) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if _, ok := r.byid[id]; !ok { + return ErrContainerUnknown + } + container := r.byid[id] + delete(container.Flags, flag) + return r.Save() +} + +func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if _, ok := r.byid[id]; !ok { + return ErrContainerUnknown + } + container := r.byid[id] + container.Flags[flag] = value + return r.Save() +} + +func (r *containerStore) Create(id string, names []string, image, layer, metadata string) (container *Container, err error) { + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, ErrDuplicateID + } + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, ErrDuplicateName + } + } + if err == nil { + newContainer := Container{ + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + Flags: make(map[string]interface{}), + } + r.containers = append(r.containers, newContainer) + container = &r.containers[len(r.containers)-1] + r.byid[id] = container + r.bylayer[layer] = container + for _, name := range names { + r.byname[name] = container + } + err = r.Save() + } + return container, err +} + +func (r *containerStore) GetMetadata(id string) (string, error) { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if container, ok := r.byid[id]; ok { + return container.Metadata, nil + } + return "", ErrContainerUnknown +} + +func (r *containerStore) SetMetadata(id, metadata string) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if container, ok := r.byid[id]; ok { + container.Metadata = metadata + return r.Save() + } + return ErrContainerUnknown +} + +func (r *containerStore) removeName(container *Container, name string) { + newNames := []string{} + for _, oldName := range container.Names { + if oldName != name { + newNames = append(newNames, oldName) + } + } + container.Names = newNames +} + +func (r *containerStore) SetNames(id string, names []string) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if container, ok := r.byid[id]; ok { + for _, name := range container.Names { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) + } + r.byname[name] = container + } + container.Names = names + return r.Save() + } + return ErrContainerUnknown +} + +func (r *containerStore) Delete(id string) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if _, ok := r.byid[id]; !ok { + return ErrContainerUnknown + } + if container, ok := r.byid[id]; ok { + newContainers := []Container{} + for _, candidate := range r.containers { + if candidate.ID != id { + newContainers = append(newContainers, candidate) + } + } + delete(r.byid, container.ID) + delete(r.bylayer, container.LayerID) + for _, name := range container.Names { + delete(r.byname, name) + } + r.containers = newContainers + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + } + return nil +} + +func (r *containerStore) Get(id string) (*Container, error) { + if c, ok := r.byname[id]; ok { + return c, nil + } else if c, ok := r.bylayer[id]; ok { + return c, nil + } + if c, ok := r.byid[id]; ok { + return c, nil + } + return nil, ErrContainerUnknown +} + +func (r *containerStore) Lookup(name string) (id string, err error) { + container, ok := r.byname[name] + if !ok { + container, ok = r.byid[name] + if !ok { + return "", ErrContainerUnknown + } + } + return container.ID, nil +} + +func (r *containerStore) Exists(id string) bool { + if _, ok := r.byname[id]; ok { + return true + } + if _, ok := r.bylayer[id]; ok { + return true + } + if _, ok := r.byid[id]; ok { + return true + } + return false +} + +func (r *containerStore) GetBigData(id, key string) ([]byte, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return nil, ErrContainerUnknown + } + return ioutil.ReadFile(r.datapath(id, key)) +} + +func (r *containerStore) GetBigDataSize(id, key string) (int64, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return -1, ErrContainerUnknown + } + if size, ok := r.byid[id].BigDataSizes[key]; ok { + return size, nil + } + return -1, ErrSizeUnknown +} + +func (r *containerStore) GetBigDataNames(id string) ([]string, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return nil, ErrContainerUnknown + } + return r.byid[id].BigDataNames, nil +} + +func (r *containerStore) SetBigData(id, key string, data []byte) error { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return ErrContainerUnknown + } + if err := os.MkdirAll(r.datadir(id), 0700); err != nil { + return err + } + err := ioutils.AtomicWriteFile(r.datapath(id, key), data, 0600) + if err == nil { + save := false + oldSize, ok := r.byid[id].BigDataSizes[key] + r.byid[id].BigDataSizes[key] = int64(len(data)) + if !ok || oldSize != r.byid[id].BigDataSizes[key] { + save = true + } + add := true + for _, name := range r.byid[id].BigDataNames { + if name == key { + add = false + break + } + } + if add { + r.byid[id].BigDataNames = append(r.byid[id].BigDataNames, key) + save = true + } + if save { + err = r.Save() + } + } + return err +} + +func (r *containerStore) Wipe() error { + ids := []string{} + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *containerStore) Lock() { + r.lockfile.Lock() +} + +func (r *containerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *containerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *containerStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *containerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} diff --git a/vendor/github.com/containers/storage/storage/images.go b/vendor/github.com/containers/storage/storage/images.go new file mode 100644 index 00000000..6798c573 --- /dev/null +++ b/vendor/github.com/containers/storage/storage/images.go @@ -0,0 +1,448 @@ +package storage + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" +) + +var ( + // ErrImageUnknown indicates that there was no image with the specified name or ID + ErrImageUnknown = errors.New("image not known") +) + +// An Image is a reference to a layer and an associated metadata string. +type Image struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // image can be referred to by its ID or any of its names. Names are + // unique among images. + Names []string `json:"names,omitempty"` + + // TopLayer is the ID of the topmost layer of the image itself. + // Multiple images can refer to the same top layer. + TopLayer string `json:"layer"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ImageStore provides bookkeeping for information about Images. +type ImageStore interface { + FileBasedStore + MetadataStore + BigDataStore + FlaggableStore + + // Create creates an image that has a specified ID (or a random one) and + // optional names, using the specified layer as its topmost (hopefully + // read-only) layer. That layer can be referenced by multiple images. + Create(id string, names []string, layer, metadata string) (*Image, error) + + // SetNames replaces the list of names associated with an image with the + // supplied values. + SetNames(id string, names []string) error + + // Exists checks if there is an image with the given ID or name. + Exists(id string) bool + + // Get retrieves information about an image given an ID or name. + Get(id string) (*Image, error) + + // Delete removes the record of the image. + Delete(id string) error + + // Wipe removes records of all images. + Wipe() error + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Images returns a slice enumerating the known images. + Images() ([]Image, error) +} + +type imageStore struct { + lockfile Locker + dir string + images []Image + byid map[string]*Image + byname map[string]*Image +} + +func (r *imageStore) Images() ([]Image, error) { + return r.images, nil +} + +func (r *imageStore) imagespath() string { + return filepath.Join(r.dir, "images.json") +} + +func (r *imageStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *imageStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +func (r *imageStore) Load() error { + needSave := false + rpath := r.imagespath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + images := []Image{} + ids := make(map[string]*Image) + names := make(map[string]*Image) + if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { + for n, image := range images { + ids[image.ID] = &images[n] + for _, name := range image.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + needSave = true + } + names[name] = &images[n] + } + } + } + r.images = images + r.byid = ids + r.byname = names + if needSave { + r.Touch() + return r.Save() + } + return nil +} + +func (r *imageStore) Save() error { + rpath := r.imagespath() + jdata, err := json.Marshal(&r.images) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newImageStore(dir string) (ImageStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + istore := imageStore{ + lockfile: lockfile, + dir: dir, + images: []Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + } + if err := istore.Load(); err != nil { + return nil, err + } + return &istore, nil +} + +func (r *imageStore) ClearFlag(id string, flag string) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if _, ok := r.byid[id]; !ok { + return ErrImageUnknown + } + image := r.byid[id] + delete(image.Flags, flag) + return r.Save() +} + +func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if _, ok := r.byid[id]; !ok { + return ErrImageUnknown + } + image := r.byid[id] + image.Flags[flag] = value + return r.Save() +} + +func (r *imageStore) Create(id string, names []string, layer, metadata string) (image *Image, err error) { + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, ErrDuplicateID + } + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, ErrDuplicateName + } + } + if err == nil { + newImage := Image{ + ID: id, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + Flags: make(map[string]interface{}), + } + r.images = append(r.images, newImage) + image = &r.images[len(r.images)-1] + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + err = r.Save() + } + return image, err +} + +func (r *imageStore) GetMetadata(id string) (string, error) { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if image, ok := r.byid[id]; ok { + return image.Metadata, nil + } + return "", ErrImageUnknown +} + +func (r *imageStore) SetMetadata(id, metadata string) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if image, ok := r.byid[id]; ok { + image.Metadata = metadata + return r.Save() + } + return ErrImageUnknown +} + +func (r *imageStore) removeName(image *Image, name string) { + newNames := []string{} + for _, oldName := range image.Names { + if oldName != name { + newNames = append(newNames, oldName) + } + } + image.Names = newNames +} + +func (r *imageStore) SetNames(id string, names []string) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if image, ok := r.byid[id]; ok { + for _, name := range image.Names { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) + } + r.byname[name] = image + } + image.Names = names + return r.Save() + } + return ErrImageUnknown +} + +func (r *imageStore) Delete(id string) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if _, ok := r.byid[id]; !ok { + return ErrImageUnknown + } + if image, ok := r.byid[id]; ok { + newImages := []Image{} + for _, candidate := range r.images { + if candidate.ID != id { + newImages = append(newImages, candidate) + } + } + delete(r.byid, image.ID) + for _, name := range image.Names { + delete(r.byname, name) + } + r.images = newImages + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + } + return nil +} + +func (r *imageStore) Get(id string) (*Image, error) { + if image, ok := r.byname[id]; ok { + return image, nil + } + if image, ok := r.byid[id]; ok { + return image, nil + } + return nil, ErrImageUnknown +} + +func (r *imageStore) Lookup(name string) (id string, err error) { + image, ok := r.byname[name] + if !ok { + image, ok = r.byid[name] + if !ok { + return "", ErrImageUnknown + } + } + return image.ID, nil +} + +func (r *imageStore) Exists(id string) bool { + if _, ok := r.byname[id]; ok { + return true + } + if _, ok := r.byid[id]; ok { + return true + } + return false +} + +func (r *imageStore) GetBigData(id, key string) ([]byte, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return nil, ErrImageUnknown + } + return ioutil.ReadFile(r.datapath(id, key)) +} + +func (r *imageStore) GetBigDataSize(id, key string) (int64, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return -1, ErrImageUnknown + } + if size, ok := r.byid[id].BigDataSizes[key]; ok { + return size, nil + } + return -1, ErrSizeUnknown +} + +func (r *imageStore) GetBigDataNames(id string) ([]string, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return nil, ErrImageUnknown + } + return r.byid[id].BigDataNames, nil +} + +func (r *imageStore) SetBigData(id, key string, data []byte) error { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return ErrImageUnknown + } + if err := os.MkdirAll(r.datadir(id), 0700); err != nil { + return err + } + err := ioutils.AtomicWriteFile(r.datapath(id, key), data, 0600) + if err == nil { + add := true + save := false + oldSize, ok := r.byid[id].BigDataSizes[key] + r.byid[id].BigDataSizes[key] = int64(len(data)) + if !ok || oldSize != r.byid[id].BigDataSizes[key] { + save = true + } + for _, name := range r.byid[id].BigDataNames { + if name == key { + add = false + break + } + } + if add { + r.byid[id].BigDataNames = append(r.byid[id].BigDataNames, key) + save = true + } + if save { + err = r.Save() + } + } + return err +} + +func (r *imageStore) Wipe() error { + ids := []string{} + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *imageStore) Lock() { + r.lockfile.Lock() +} + +func (r *imageStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *imageStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *imageStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *imageStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} diff --git a/vendor/github.com/containers/storage/storage/layers.go b/vendor/github.com/containers/storage/storage/layers.go new file mode 100644 index 00000000..924b99ad --- /dev/null +++ b/vendor/github.com/containers/storage/storage/layers.go @@ -0,0 +1,881 @@ +package storage + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +const ( + tarSplitSuffix = ".tar-split.gz" + incompleteFlag = "incomplete" + compressionFlag = "diff-compression" +) + +var ( + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer + ErrParentUnknown = errors.New("parent of layer not known") + // ErrLayerUnknown indicates that there was no layer with the specified name or ID + ErrLayerUnknown = errors.New("layer not known") +) + +// A Layer is a record of a copy-on-write layer that's stored by the lower +// level graph driver. +type Layer struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // layer can be referred to by its ID or any of its names. Names are + // unique among layers. + Names []string `json:"names,omitempty"` + + // Parent is the ID of a layer from which this layer inherits data. + Parent string `json:"parent,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // MountLabel is an SELinux label which should be used when attempting to mount + // the layer. + MountLabel string `json:"mountlabel,omitempty"` + + // MountPoint is the path where the layer is mounted, or where it was most + // recently mounted. This can change between subsequent Unmount() and + // Mount() calls, so the caller should consult this value after Mount() + // succeeds to find the location of the container's root filesystem. + MountPoint string `json:"-"` + + // MountCount is used as a reference count for the container's layer being + // mounted at the mount point. + MountCount int `json:"-"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +type layerMountPoint struct { + ID string `json:"id"` + MountPoint string `json:"path"` + MountCount int `json:"count"` +} + +// LayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type LayerStore interface { + FileBasedStore + MetadataStore + FlaggableStore + + // Create creates a new layer, optionally giving it a specified ID rather than + // a randomly-generated one, either inheriting data from another specified + // layer or the empty base layer. The new layer can optionally be given names + // and have an SELinux label specified for use when mounting it. Some + // underlying drivers can accept a "size" option. At this time, most + // underlying drivers do not themselves distinguish between writeable + // and read-only layers. + Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (*Layer, error) + + // CreateWithFlags combines the functions of Create and SetFlag. + CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) + + // Put combines the functions of CreateWithFlags and ApplyDiff. + Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error) + + // Exists checks if a layer with the specified name or ID is known. + Exists(id string) bool + + // Get retrieves information about a layer given an ID or name. + Get(id string) (*Layer, error) + + // SetNames replaces the list of names associated with a layer with the + // supplied values. + SetNames(id string, names []string) error + + // Status returns an slice of key-value pairs, suitable for human consumption, + // relaying whatever status information the underlying driver can share. + Status() ([][2]string, error) + + // Delete deletes a layer with the specified name or ID. + Delete(id string) error + + // Wipe deletes all layers. + Wipe() error + + // Mount mounts a layer for use. If the specified layer is the parent of other + // layers, it should not be written to. An SELinux label to be applied to the + // mount can be specified to override the one configured for the layer. + Mount(id, mountLabel string) (string, error) + + // Unmount unmounts a layer when it is no longer in use. + Unmount(id string) error + + // Changes returns a slice of Change structures, which contain a pathname + // (Path) and a description of what sort of change (Kind) was made by the + // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a + // specified layer. By default, the layer's parent is used as a reference. + Changes(from, to string) ([]archive.Change, error) + + // Diff produces a tarstream which can be applied to a layer with the contents + // of the first layer to produce a layer with the contents of the second layer. + // By default, the parent of the second layer is used as the first + // layer, so it need not be specified. + Diff(from, to string) (io.ReadCloser, error) + + // DiffSize produces an estimate of the length of the tarstream which would be + // produced by Diff. + DiffSize(from, to string) (int64, error) + + // ApplyDiff reads a tarstream which was created by a previous call to Diff and + // applies its changes to a specified layer. + ApplyDiff(to string, diff archive.Reader) (int64, error) + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Layers returns a slice of the known layers. + Layers() ([]Layer, error) +} + +type layerStore struct { + lockfile Locker + rundir string + driver drivers.Driver + layerdir string + layers []Layer + byid map[string]*Layer + byname map[string]*Layer + byparent map[string][]*Layer + bymount map[string]*Layer +} + +func (r *layerStore) Layers() ([]Layer, error) { + return r.layers, nil +} + +func (r *layerStore) mountspath() string { + return filepath.Join(r.rundir, "mountpoints.json") +} + +func (r *layerStore) layerspath() string { + return filepath.Join(r.layerdir, "layers.json") +} + +func (r *layerStore) Load() error { + needSave := false + rpath := r.layerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layers := []Layer{} + ids := make(map[string]*Layer) + names := make(map[string]*Layer) + mounts := make(map[string]*Layer) + parents := make(map[string][]*Layer) + if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { + for n, layer := range layers { + ids[layer.ID] = &layers[n] + for _, name := range layer.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + needSave = true + } + names[name] = &layers[n] + } + if pslice, ok := parents[layer.Parent]; ok { + parents[layer.Parent] = append(pslice, &layers[n]) + } else { + parents[layer.Parent] = []*Layer{&layers[n]} + } + } + } + mpath := r.mountspath() + data, err = ioutil.ReadFile(mpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layerMounts := []layerMountPoint{} + if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { + for _, mount := range layerMounts { + if mount.MountPoint != "" { + if layer, ok := ids[mount.ID]; ok { + mounts[mount.MountPoint] = layer + layer.MountPoint = mount.MountPoint + layer.MountCount = mount.MountCount + } + } + } + } + r.layers = layers + r.byid = ids + r.byname = names + r.byparent = parents + r.bymount = mounts + err = nil + // Last step: try to remove anything that a previous user of this + // storage area marked for deletion but didn't manage to actually + // delete. + for _, layer := range r.layers { + if cleanup, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := cleanup.(bool); ok && b { + err = r.Delete(layer.ID) + if err != nil { + break + } + needSave = true + } + } + } + if needSave { + r.Touch() + return r.Save() + } + return err +} + +func (r *layerStore) Save() error { + rpath := r.layerspath() + jldata, err := json.Marshal(&r.layers) + if err != nil { + return err + } + mpath := r.mountspath() + mounts := []layerMountPoint{} + for _, layer := range r.layers { + if layer.MountPoint != "" && layer.MountCount > 0 { + mounts = append(mounts, layerMountPoint{ + ID: layer.ID, + MountPoint: layer.MountPoint, + MountCount: layer.MountCount, + }) + } + } + jmdata, err := json.Marshal(&mounts) + if err != nil { + return err + } + if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil { + return err + } + return ioutils.AtomicWriteFile(mpath, jmdata, 0600) +} + +func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) { + if err := os.MkdirAll(rundir, 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(layerdir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + rlstore := layerStore{ + lockfile: lockfile, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + byparent: make(map[string][]*Layer), + } + if err := rlstore.Load(); err != nil { + return nil, err + } + return &rlstore, nil +} + +func (r *layerStore) ClearFlag(id string, flag string) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return ErrLayerUnknown + } + layer := r.byid[id] + delete(layer.Flags, flag) + return r.Save() +} + +func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return ErrLayerUnknown + } + layer := r.byid[id] + layer.Flags[flag] = value + return r.Save() +} + +func (r *layerStore) Status() ([][2]string, error) { + return r.driver.Status(), nil +} + +func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) { + size = -1 + if err := os.MkdirAll(r.rundir, 0700); err != nil { + return nil, -1, err + } + if err := os.MkdirAll(r.layerdir, 0700); err != nil { + return nil, -1, err + } + if parentLayer, ok := r.byname[parent]; ok { + parent = parentLayer.ID + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, -1, ErrDuplicateID + } + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, -1, ErrDuplicateName + } + } + if writeable { + err = r.driver.CreateReadWrite(id, parent, mountLabel, options) + } else { + err = r.driver.Create(id, parent, mountLabel, options) + } + if err == nil { + newLayer := Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Flags: make(map[string]interface{}), + } + r.layers = append(r.layers, newLayer) + layer = &r.layers[len(r.layers)-1] + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + if pslice, ok := r.byparent[parent]; ok { + pslice = append(pslice, layer) + r.byparent[parent] = pslice + } else { + r.byparent[parent] = []*Layer{layer} + } + for flag, value := range flags { + layer.Flags[flag] = value + } + if diff != nil { + layer.Flags[incompleteFlag] = true + err = r.Save() + if err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + size, err = r.ApplyDiff(layer.ID, diff) + if err != nil { + if r.Delete(layer.ID) != nil { + // Either a driver error or an error saving. + // We now have a layer that's been marked for + // deletion but which we failed to remove. + } + return nil, -1, err + } + delete(layer.Flags, incompleteFlag) + } + err = r.Save() + if err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + } + return layer, size, err +} + +func (r *layerStore) CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { + layer, _, err = r.Put(id, parent, names, mountLabel, options, writeable, flags, nil) + return layer, err +} + +func (r *layerStore) Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (layer *Layer, err error) { + return r.CreateWithFlags(id, parent, names, mountLabel, options, writeable, nil) +} + +func (r *layerStore) Mount(id, mountLabel string) (string, error) { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return "", ErrLayerUnknown + } + layer := r.byid[id] + if layer.MountCount > 0 { + layer.MountCount++ + return layer.MountPoint, r.Save() + } + if mountLabel == "" { + if layer, ok := r.byid[id]; ok { + mountLabel = layer.MountLabel + } + } + mountpoint, err := r.driver.Get(id, mountLabel) + if mountpoint != "" && err == nil { + if layer, ok := r.byid[id]; ok { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountPoint = filepath.Clean(mountpoint) + layer.MountCount++ + r.bymount[layer.MountPoint] = layer + err = r.Save() + } + } + return mountpoint, err +} + +func (r *layerStore) Unmount(id string) error { + if layer, ok := r.bymount[filepath.Clean(id)]; ok { + id = layer.ID + } + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return ErrLayerUnknown + } + layer := r.byid[id] + if layer.MountCount > 1 { + layer.MountCount-- + return r.Save() + } + err := r.driver.Put(id) + if err == nil || os.IsNotExist(err) { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountCount-- + layer.MountPoint = "" + err = r.Save() + } + return err +} + +func (r *layerStore) removeName(layer *Layer, name string) { + newNames := []string{} + for _, oldName := range layer.Names { + if oldName != name { + newNames = append(newNames, oldName) + } + } + layer.Names = newNames +} + +func (r *layerStore) SetNames(id string, names []string) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if layer, ok := r.byid[id]; ok { + for _, name := range layer.Names { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) + } + r.byname[name] = layer + } + layer.Names = names + return r.Save() + } + return ErrLayerUnknown +} + +func (r *layerStore) GetMetadata(id string) (string, error) { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if layer, ok := r.byid[id]; ok { + return layer.Metadata, nil + } + return "", ErrLayerUnknown +} + +func (r *layerStore) SetMetadata(id, metadata string) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if layer, ok := r.byid[id]; ok { + layer.Metadata = metadata + return r.Save() + } + return ErrLayerUnknown +} + +func (r *layerStore) tspath(id string) string { + return filepath.Join(r.layerdir, id+tarSplitSuffix) +} + +func (r *layerStore) Delete(id string) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return ErrLayerUnknown + } + for r.byid[id].MountCount > 0 { + if err := r.Unmount(id); err != nil { + return err + } + } + err := r.driver.Remove(id) + if err == nil { + os.Remove(r.tspath(id)) + if layer, ok := r.byid[id]; ok { + pslice := r.byparent[layer.Parent] + newPslice := []*Layer{} + for _, candidate := range pslice { + if candidate.ID != id { + newPslice = append(newPslice, candidate) + } + } + delete(r.byid, layer.ID) + if len(newPslice) > 0 { + r.byparent[layer.Parent] = newPslice + } else { + delete(r.byparent, layer.Parent) + } + for _, name := range layer.Names { + delete(r.byname, name) + } + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + newLayers := []Layer{} + for _, candidate := range r.layers { + if candidate.ID != id { + newLayers = append(newLayers, candidate) + } + } + r.layers = newLayers + if err = r.Save(); err != nil { + return err + } + } + } + return err +} + +func (r *layerStore) Lookup(name string) (id string, err error) { + layer, ok := r.byname[name] + if !ok { + layer, ok = r.byid[name] + if !ok { + return "", ErrLayerUnknown + } + } + return layer.ID, nil +} + +func (r *layerStore) Exists(id string) bool { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + l, exists := r.byid[id] + return l != nil && exists +} + +func (r *layerStore) Get(id string) (*Layer, error) { + if l, ok := r.byname[id]; ok { + return l, nil + } + if l, ok := r.byid[id]; ok { + return l, nil + } + return nil, ErrLayerUnknown +} + +func (r *layerStore) Wipe() error { + ids := []string{} + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { + if layer, ok := r.byname[from]; ok { + from = layer.ID + } + if layer, ok := r.byname[to]; ok { + to = layer.ID + } + if from == "" { + if layer, ok := r.byid[to]; ok { + from = layer.Parent + } + } + if to == "" { + return nil, ErrLayerUnknown + } + if _, ok := r.byid[to]; !ok { + return nil, ErrLayerUnknown + } + return r.driver.Changes(to, from) +} + +type simpleGetCloser struct { + r *layerStore + path string + id string +} + +func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { + return os.Open(filepath.Join(s.path, path)) +} + +func (s *simpleGetCloser) Close() error { + return s.r.Unmount(s.id) +} + +func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { + if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { + return getter.DiffGetter(id) + } + path, err := r.Mount(id, "") + if err != nil { + return nil, err + } + return &simpleGetCloser{ + r: r, + path: path, + id: id, + }, nil +} + +func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) { + var metadata storage.Unpacker + + if layer, ok := r.byname[from]; ok { + from = layer.ID + } + if layer, ok := r.byname[to]; ok { + to = layer.ID + } + if from == "" { + if layer, ok := r.byid[to]; ok { + from = layer.Parent + } + } + if to == "" { + return nil, ErrParentUnknown + } + if _, ok := r.byid[to]; !ok { + return nil, ErrLayerUnknown + } + compression := archive.Uncompressed + if cflag, ok := r.byid[to].Flags[compressionFlag]; ok { + if ctype, ok := cflag.(float64); ok { + compression = archive.Compression(ctype) + } else if ctype, ok := cflag.(archive.Compression); ok { + compression = archive.Compression(ctype) + } + } + if from != r.byid[to].Parent { + diff, err := r.driver.Diff(to, from) + if err == nil && (compression != archive.Uncompressed) { + preader, pwriter := io.Pipe() + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + diff.Close() + pwriter.Close() + return nil, err + } + go func() { + io.Copy(compressor, diff) + diff.Close() + compressor.Close() + pwriter.Close() + }() + diff = preader + } + return diff, err + } + + tsfile, err := os.Open(r.tspath(to)) + if err != nil { + if os.IsNotExist(err) { + return r.driver.Diff(to, from) + } + return nil, err + } + defer tsfile.Close() + + decompressor, err := gzip.NewReader(tsfile) + if err != nil { + return nil, err + } + defer decompressor.Close() + + tsbytes, err := ioutil.ReadAll(decompressor) + if err != nil { + return nil, err + } + + metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes)) + + if fgetter, err := r.newFileGetter(to); err != nil { + return nil, err + } else { + var stream io.ReadCloser + if compression != archive.Uncompressed { + preader, pwriter := io.Pipe() + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + fgetter.Close() + pwriter.Close() + preader.Close() + return nil, err + } + go func() { + asm.WriteOutputTarStream(fgetter, metadata, compressor) + compressor.Close() + pwriter.Close() + }() + stream = preader + } else { + stream = asm.NewOutputTarStream(fgetter, metadata) + } + return ioutils.NewReadCloserWrapper(stream, func() error { + err1 := stream.Close() + err2 := fgetter.Close() + if err2 == nil { + return err1 + } + return err2 + }), nil + } +} + +func (r *layerStore) DiffSize(from, to string) (size int64, err error) { + if layer, ok := r.byname[from]; ok { + from = layer.ID + } + if layer, ok := r.byname[to]; ok { + to = layer.ID + } + if from == "" { + if layer, ok := r.byid[to]; ok { + from = layer.Parent + } + } + if to == "" { + return -1, ErrParentUnknown + } + if _, ok := r.byid[to]; !ok { + return -1, ErrLayerUnknown + } + return r.driver.DiffSize(to, from) +} + +func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) { + if layer, ok := r.byname[to]; ok { + to = layer.ID + } + layer, ok := r.byid[to] + if !ok { + return -1, ErrLayerUnknown + } + + header := make([]byte, 10240) + n, err := diff.Read(header) + if err != nil && err != io.EOF { + return -1, err + } + + compression := archive.DetectCompression(header[:n]) + defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff) + + tsdata := bytes.Buffer{} + compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed) + if err != nil { + compressor = gzip.NewWriter(&tsdata) + } + metadata := storage.NewJSONPacker(compressor) + if c, err := archive.DecompressStream(defragmented); err != nil { + return -1, err + } else { + defragmented = c + } + payload, err := asm.NewInputTarStream(defragmented, metadata, storage.NewDiscardFilePutter()) + if err != nil { + return -1, err + } + size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload) + compressor.Close() + if err == nil { + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { + return -1, err + } + } + + if compression != archive.Uncompressed { + layer.Flags[compressionFlag] = compression + } else { + delete(layer.Flags, compressionFlag) + } + + return size, err +} + +func (r *layerStore) Lock() { + r.lockfile.Lock() +} + +func (r *layerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *layerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *layerStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *layerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} diff --git a/vendor/github.com/containers/storage/storage/lockfile.go b/vendor/github.com/containers/storage/storage/lockfile.go new file mode 100644 index 00000000..5cedea8d --- /dev/null +++ b/vendor/github.com/containers/storage/storage/lockfile.go @@ -0,0 +1,138 @@ +package storage + +import ( + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/containers/storage/pkg/stringid" +) + +// A Locker represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +type Locker interface { + sync.Locker + + // Touch records, for others sharing the lock, that the caller was the + // last writer. It should only be called with the lock held. + Touch() error + + // Modified() checks if the most recent writer was a party other than the + // last recorded writer. It should only be called with the lock held. + Modified() (bool, error) + + // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. + TouchedSince(when time.Time) bool +} + +type lockfile struct { + mu sync.Mutex + file string + fd uintptr + lw string +} + +var ( + lockfiles map[string]*lockfile + lockfilesLock sync.Mutex +) + +// GetLockfile opens a lock file, creating it if necessary. The Locker object +// return will be returned unlocked. +func GetLockfile(path string) (Locker, error) { + lockfilesLock.Lock() + defer lockfilesLock.Unlock() + if lockfiles == nil { + lockfiles = make(map[string]*lockfile) + } + if locker, ok := lockfiles[filepath.Clean(path)]; ok { + return locker, nil + } + fd, err := syscall.Open(filepath.Clean(path), os.O_RDWR|os.O_CREATE, syscall.S_IRUSR|syscall.S_IWUSR) + if err != nil { + return nil, err + } + locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID()} + lockfiles[filepath.Clean(path)] = locker + return locker, nil +} + +func (l *lockfile) Lock() { + lk := syscall.Flock_t{ + Type: syscall.F_WRLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + Pid: int32(os.Getpid()), + } + l.mu.Lock() + for syscall.FcntlFlock(l.fd, syscall.F_SETLKW, &lk) != nil { + time.Sleep(10 * time.Millisecond) + } +} + +func (l *lockfile) Unlock() { + lk := syscall.Flock_t{ + Type: syscall.F_UNLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + Pid: int32(os.Getpid()), + } + for syscall.FcntlFlock(l.fd, syscall.F_SETLKW, &lk) != nil { + time.Sleep(10 * time.Millisecond) + } + l.mu.Unlock() +} + +func (l *lockfile) Touch() error { + l.lw = stringid.GenerateRandomID() + id := []byte(l.lw) + _, err := syscall.Seek(int(l.fd), 0, os.SEEK_SET) + if err != nil { + return err + } + n, err := syscall.Write(int(l.fd), id) + if err != nil { + return err + } + if n != len(id) { + return syscall.ENOSPC + } + err = syscall.Fsync(int(l.fd)) + if err != nil { + return err + } + return nil +} + +func (l *lockfile) Modified() (bool, error) { + id := []byte(l.lw) + _, err := syscall.Seek(int(l.fd), 0, os.SEEK_SET) + if err != nil { + return true, err + } + n, err := syscall.Read(int(l.fd), id) + if err != nil { + return true, err + } + if n != len(id) { + return true, syscall.ENOSPC + } + lw := l.lw + l.lw = string(id) + return l.lw != lw, nil +} + +func (l *lockfile) TouchedSince(when time.Time) bool { + st := syscall.Stat_t{} + err := syscall.Fstat(int(l.fd), &st) + if err != nil { + return true + } + touched := time.Unix(st.Mtim.Unix()) + return when.Before(touched) +} diff --git a/vendor/github.com/containers/storage/storage/store.go b/vendor/github.com/containers/storage/storage/store.go new file mode 100644 index 00000000..4480eaee --- /dev/null +++ b/vendor/github.com/containers/storage/storage/store.go @@ -0,0 +1,2171 @@ +package storage + +import ( + "encoding/base64" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + // register all of the built-in drivers + _ "github.com/containers/storage/drivers/register" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/storageversion" +) + +var ( + ErrLoadError = errors.New("error loading storage metadata") + ErrDuplicateID = errors.New("that ID is already in use") + ErrDuplicateName = errors.New("that name is already in use") + ErrParentIsContainer = errors.New("would-be parent layer is a container") + ErrNotAContainer = errors.New("identifier is not a container") + ErrNotAnImage = errors.New("identifier is not an image") + ErrNotALayer = errors.New("identifier is not a layer") + ErrNotAnID = errors.New("identifier is not a layer, image, or container") + ErrLayerHasChildren = errors.New("layer has children") + ErrLayerUsedByImage = errors.New("layer is in use by an image") + ErrLayerUsedByContainer = errors.New("layer is in use by a container") + ErrImageUsedByContainer = errors.New("image is in use by a container") + ErrIncompleteOptions = errors.New("missing necessary StoreOptions") + ErrSizeUnknown = errors.New("size is not known") + DefaultStoreOptions StoreOptions + stores []*store + storesLock sync.Mutex +) + +// FileBasedStore wraps up the most common methods of the various types of file-based +// data stores that we implement. +type FileBasedStore interface { + Locker + + // Load reloads the contents of the store from disk. It should be called + // with the lock held. + Load() error + + // Save saves the contents of the store to disk. It should be called with + // the lock held, and Touch() should be called afterward before releasing the + // lock. + Save() error +} + +// MetadataStore wraps up methods for getting and setting metadata associated with IDs. +type MetadataStore interface { + // GetMetadata reads metadata associated with an item with the specified ID. + GetMetadata(id string) (string, error) + + // SetMetadata updates the metadata associated with the item with the specified ID. + SetMetadata(id, metadata string) error +} + +// A BigDataStore wraps up the most common methods of the various types of +// file-based lookaside stores that we implement. +type BigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated with this + // ID. + SetBigData(id, key string, data []byte) error + + // GetBigData retrieves a (potentially large) piece of data associated with + // this ID, if it has previously been set. + GetBigData(id, key string) ([]byte, error) + + // GetBigDataSize retrieves the size of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + GetBigDataSize(id, key string) (int64, error) + + // GetBigDataNames() returns a list of the names of previously-stored pieces of + // data. + GetBigDataNames(id string) ([]string, error) +} + +// A FlaggableStore can have flags set and cleared on items which it manages. +type FlaggableStore interface { + // ClearFlag removes a named flag from an item in the store. + ClearFlag(id string, flag string) error + + // SetFlag sets a named flag and its value on an item in the store. + SetFlag(id string, flag string, value interface{}) error +} + +// StoreOptions is used for passing initialization options to GetStore(), for +// initializing a Store object and the underlying storage that it controls. +type StoreOptions struct { + // RunRoot is the filesystem path under which we can store run-time + // information, such as the locations of active mount points, that we + // want to lose if the host is rebooted. + RunRoot string `json:"runroot,omitempty"` + // GraphRoot is the filesystem path under which we will store the + // contents of layers, images, and containers. + GraphRoot string `json:"root,omitempty"` + // GraphDriverName is the underlying storage driver that we'll be + // using. It only needs to be specified the first time a Store is + // initialized for a given RunRoot and GraphRoot. + GraphDriverName string `json:"driver,omitempty"` + // GraphDriverOptions are driver-specific options. + GraphDriverOptions []string `json:"driver-options,omitempty"` + // UidMap and GidMap are used mainly for deciding on the ownership of + // files in layers as they're stored on disk, which is often necessary + // when user namespaces are being used. + UidMap []idtools.IDMap `json:"uidmap,omitempty"` + GidMap []idtools.IDMap `json:"gidmap,omitempty"` +} + +// Store wraps up the various types of file-based stores that we use into a +// singleton object that initializes and manages them all together. +type Store interface { + // GetRunRoot, GetGraphRoot, GetGraphDriverName, and GetGraphOptions retrieve + // settings that were passed to GetStore() when the object was created. + GetRunRoot() string + GetGraphRoot() string + GetGraphDriverName() string + GetGraphOptions() []string + + // GetGraphDriver obtains and returns a handle to the graph Driver object used + // by the Store. + GetGraphDriver() (drivers.Driver, error) + + // GetLayerStore obtains and returns a handle to the layer store object used by + // the Store. + GetLayerStore() (LayerStore, error) + + // GetImageStore obtains and returns a handle to the image store object used by + // the Store. + GetImageStore() (ImageStore, error) + + // GetContainerStore obtains and returns a handle to the container store object + // used by the Store. + GetContainerStore() (ContainerStore, error) + + // CreateLayer creates a new layer in the underlying storage driver, optionally + // having the specified ID (one will be assigned if none is specified), with + // the specified layer (or no layer) as its parent, and with optional names. + // (The writeable flag is ignored.) + CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) + + // PutLayer combines the functions of CreateLayer and ApplyDiff, marking the + // layer for automatic removal if applying the diff fails for any reason. + PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff archive.Reader) (*Layer, int64, error) + + // CreateImage creates a new image, optionally with the specified ID + // (one will be assigned if none is specified), with optional names, + // referring to a specified image, and with optional metadata. An + // image is a record which associates the ID of a layer with a + // additional bookkeeping information which the library stores for the + // convenience of its caller. + CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) + + // CreateContainer creates a new container, optionally with the specified ID + // (one will be assigned if none is specified), with optional names, + // using the specified image's top layer as the basis for the + // container's layer, and assigning the specified ID to that layer (one + // will be created if none is specified). A container is a layer which + // is associated with additional bookkeeping information which the + // library stores for the convenience of its caller. + CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + + // GetMetadata retrieves the metadata which is associated with a layer, image, + // or container (whichever the passed-in ID refers to). + GetMetadata(id string) (string, error) + + // SetMetadata updates the metadata which is associated with a layer, image, or + // container (whichever the passed-in ID refers to) to match the specified + // value. The metadata value can be retrieved at any time using GetMetadata, + // or using GetLayer, GetImage, or GetContainer and reading the object directly. + SetMetadata(id, metadata string) error + + // Exists checks if there is a layer, image, or container which has the + // passed-in ID or name. + Exists(id string) bool + + // Status asks for a status report, in the form of key-value pairs, from the + // underlying storage driver. The contents vary from driver to driver. + Status() ([][2]string, error) + + // Delete removes the layer, image, or container which has the passed-in ID or + // name. Note that no safety checks are performed, so this can leave images + // with references to layers which do not exist, and layers with references to + // parents which no longer exist. + Delete(id string) error + + // DeleteLayer attempts to remove the specified layer. If the layer is the + // parent of any other layer, or is referred to by any images, it will return + // an error. + DeleteLayer(id string) error + + // DeleteImage removes the specified image if it is not referred to by + // any containers. If its top layer is then no longer referred to by + // any other images and is not the parent of any other layers, its top + // layer will be removed. If that layer's parent is no longer referred + // to by any other images and is not the parent of any other layers, + // then it, too, will be removed. This procedure will be repeated + // until a layer which should not be removed, or the base layer, is + // reached, at which point the list of removed layers is returned. If + // the commit argument is false, the image and layers are not removed, + // but the list of layers which would be removed is still returned. + DeleteImage(id string, commit bool) (layers []string, err error) + + // DeleteContainer removes the specified container and its layer. If there is + // no matching container, or if the container exists but its layer does not, an + // error will be returned. + DeleteContainer(id string) error + + // Wipe removes all known layers, images, and containers. + Wipe() error + + // Mount attempts to mount a layer, image, or container for access, and returns + // the pathname if it succeeds. + Mount(id, mountLabel string) (string, error) + + // Unmount attempts to unmount a layer, image, or container, given an ID, a + // name, or a mount path. + Unmount(id string) error + + // Changes returns a summary of the changes which would need to be made to one + // layer to make its contents the same as a second layer. If the first layer + // is not specified, the second layer's parent is assumed. Each Change + // structure contains a Path relative to the layer's root directory, and a Kind + // which is either ChangeAdd, ChangeModify, or ChangeDelete. + Changes(from, to string) ([]archive.Change, error) + + // DiffSize returns a count of the size of the tarstream which would specify + // the changes returned by Changes. + DiffSize(from, to string) (int64, error) + + // Diff returns the tarstream which would specify the changes returned by + // Changes. + Diff(from, to string) (io.ReadCloser, error) + + // ApplyDiff applies a tarstream to a layer. Information about the tarstream + // is cached with the layer. Typically, a layer which is populated using a + // tarstream will be expected to not be modified in any other way, either + // before or after the diff is applied. + ApplyDiff(to string, diff archive.Reader) (int64, error) + + // Layers returns a list of the currently known layers. + Layers() ([]Layer, error) + + // Images returns a list of the currently known images. + Images() ([]Image, error) + + // Containers returns a list of the currently known containers. + Containers() ([]Container, error) + + // GetNames returns the list of names for a layer, image, or container. + GetNames(id string) ([]string, error) + + // SetNames changes the list of names for a layer, image, or container. + SetNames(id string, names []string) error + + // ListImageBigData retrieves a list of the (possibly large) chunks of named + // data associated with an image. + ListImageBigData(id string) ([]string, error) + + // GetImageBigData retrieves a (possibly large) chunk of named data associated + // with an image. + GetImageBigData(id, key string) ([]byte, error) + + // GetImageBigDataSize retrieves the size of a (possibly large) chunk + // of named data associated with an image. + GetImageBigDataSize(id, key string) (int64, error) + + // SetImageBigData stores a (possibly large) chunk of named data associated + // with an image. + SetImageBigData(id, key string, data []byte) error + + // ListContainerBigData retrieves a list of the (possibly large) chunks of + // named data associated with a container. + ListContainerBigData(id string) ([]string, error) + + // GetContainerBigData retrieves a (possibly large) chunk of named data + // associated with a container. + GetContainerBigData(id, key string) ([]byte, error) + + // GetContainerBigDataSize retrieves the size of a (possibly large) + // chunk of named data associated with a container. + GetContainerBigDataSize(id, key string) (int64, error) + + // SetContainerBigData stores a (possibly large) chunk of named data + // associated with a container. + SetContainerBigData(id, key string, data []byte) error + + // GetLayer returns a specific layer. + GetLayer(id string) (*Layer, error) + + // GetImage returns a specific image. + GetImage(id string) (*Image, error) + + // GetImagesByTopLayer returns a list of images which reference the specified + // layer as their top layer. They will have different IDs and names + // and may have different metadata, big data items, and flags. + GetImagesByTopLayer(id string) ([]*Image, error) + + // GetContainer returns a specific container. + GetContainer(id string) (*Container, error) + + // GetContainerByLayer returns a specific container based on its layer ID or + // name. + GetContainerByLayer(id string) (*Container, error) + + // GetContainerDirectory returns a path of a directory which the caller + // can use to store data, specific to the container, which the library + // does not directly manage. The directory will be deleted when the + // container is deleted. + GetContainerDirectory(id string) (string, error) + + // SetContainerDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // directory. + SetContainerDirectoryFile(id, file string, data []byte) error + + // GetFromContainerDirectory is a convenience function which reads + // the contents of the specified file relative to the container's + // directory. + GetFromContainerDirectory(id, file string) ([]byte, error) + + // GetContainerRunDirectory returns a path of a directory which the + // caller can use to store data, specific to the container, which the + // library does not directly manage. The directory will be deleted + // when the host system is restarted. + GetContainerRunDirectory(id string) (string, error) + + // SetContainerRunDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // run directory. + SetContainerRunDirectoryFile(id, file string, data []byte) error + + // GetFromContainerRunDirectory is a convenience function which reads + // the contents of the specified file relative to the container's run + // directory. + GetFromContainerRunDirectory(id, file string) ([]byte, error) + + // Lookup returns the ID of a layer, image, or container with the specified + // name or ID. + Lookup(name string) (string, error) + + // Shutdown attempts to free any kernel resources which are being used + // by the underlying driver. If "force" is true, any mounted (i.e., in + // use) layers are unmounted beforehand. If "force" is not true, then + // layers being in use is considered to be an error condition. A list + // of still-mounted layers is returned along with possible errors. + Shutdown(force bool) (layers []string, err error) + + // Version returns version information, in the form of key-value pairs, from + // the storage package. + Version() ([][2]string, error) +} + +// ImageOptions is used for passing options to a Store's CreateImage() method. +type ImageOptions struct { +} + +// ContainerOptions is used for passing options to a Store's CreateContainer() method. +type ContainerOptions struct { +} + +type store struct { + lastLoaded time.Time + runRoot string + graphLock Locker + graphRoot string + graphDriverName string + graphOptions []string + uidMap []idtools.IDMap + gidMap []idtools.IDMap + graphDriver drivers.Driver + layerStore LayerStore + imageStore ImageStore + containerStore ContainerStore +} + +// GetStore attempts to find an already-created Store object matching the +// specified location and graph driver, and if it can't, it creates and +// initializes a new Store object, and the underlying storage that it controls. +func GetStore(options StoreOptions) (Store, error) { + if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { + options = DefaultStoreOptions + } + + if options.GraphRoot != "" { + options.GraphRoot = filepath.Clean(options.GraphRoot) + } + if options.RunRoot != "" { + options.RunRoot = filepath.Clean(options.RunRoot) + } + + storesLock.Lock() + defer storesLock.Unlock() + + for _, s := range stores { + if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + return s, nil + } + } + + if options.GraphRoot == "" { + return nil, ErrIncompleteOptions + } + if options.RunRoot == "" { + return nil, ErrIncompleteOptions + } + + if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + for _, subdir := range []string{} { + if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + } + if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} { + if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + } + + graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock")) + if err != nil { + return nil, err + } + s := &store{ + runRoot: options.RunRoot, + graphLock: graphLock, + graphRoot: options.GraphRoot, + graphDriverName: options.GraphDriverName, + graphOptions: options.GraphDriverOptions, + uidMap: copyIDMap(options.UidMap), + gidMap: copyIDMap(options.GidMap), + } + if err := s.load(); err != nil { + return nil, err + } + + stores = append(stores, s) + + return s, nil +} + +func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap { + m := []idtools.IDMap{} + if idmap != nil { + m = make([]idtools.IDMap, len(idmap)) + copy(m, idmap) + } + if len(m) > 0 { + return m[:] + } + return nil +} + +func (s *store) GetRunRoot() string { + return s.runRoot +} + +func (s *store) GetGraphDriverName() string { + return s.graphDriverName +} + +func (s *store) GetGraphRoot() string { + return s.graphRoot +} + +func (s *store) GetGraphOptions() []string { + return s.graphOptions +} + +func (s *store) load() error { + driver, err := s.GetGraphDriver() + if err != nil { + return err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + driverPrefix := s.graphDriverName + "-" + + rls, err := s.GetLayerStore() + if err != nil { + return err + } + s.layerStore = rls + + gipath := filepath.Join(s.graphRoot, driverPrefix+"images") + if err := os.MkdirAll(gipath, 0700); err != nil { + return err + } + ris, err := newImageStore(gipath) + if err != nil { + return err + } + s.imageStore = ris + gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return err + } + rcs, err := newContainerStore(gcpath) + if err != nil { + return err + } + rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return err + } + s.containerStore = rcs + return nil +} + +func (s *store) getGraphDriver() (drivers.Driver, error) { + if s.graphDriver != nil { + return s.graphDriver, nil + } + driver, err := drivers.New(s.graphRoot, s.graphDriverName, s.graphOptions, s.uidMap, s.gidMap) + if err != nil { + return nil, err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + return driver, nil +} + +func (s *store) GetGraphDriver() (drivers.Driver, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + return s.getGraphDriver() +} + +func (s *store) GetLayerStore() (LayerStore, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + if s.layerStore != nil { + return s.layerStore, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0700); err != nil { + return nil, err + } + glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") + if err := os.MkdirAll(glpath, 0700); err != nil { + return nil, err + } + rls, err := newLayerStore(rlpath, glpath, driver) + if err != nil { + return nil, err + } + s.layerStore = rls + return s.layerStore, nil +} + +func (s *store) GetImageStore() (ImageStore, error) { + if s.imageStore != nil { + return s.imageStore, nil + } + return nil, ErrLoadError +} + +func (s *store) GetContainerStore() (ContainerStore, error) { + if s.containerStore != nil { + return s.containerStore, nil + } + return nil, ErrLoadError +} + +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff archive.Reader) (*Layer, int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, -1, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, -1, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + defer ristore.Touch() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + defer rcstore.Touch() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + if id == "" { + id = stringid.GenerateRandomID() + } + if parent != "" { + if l, err := rlstore.Get(parent); err == nil && l != nil { + parent = l.ID + } else { + return nil, -1, ErrLayerUnknown + } + containers, err := rcstore.Containers() + if err != nil { + return nil, -1, err + } + for _, container := range containers { + if container.LayerID == parent { + return nil, -1, ErrParentIsContainer + } + } + } + return rlstore.Put(id, parent, names, mountLabel, nil, writeable, nil, diff) +} + +func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) { + layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, nil) + return layer, err +} + +func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + defer ristore.Touch() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + defer rcstore.Touch() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + if id == "" { + id = stringid.GenerateRandomID() + } + + ilayer, err := rlstore.Get(layer) + if err != nil { + return nil, err + } + if ilayer == nil { + return nil, ErrLayerUnknown + } + layer = ilayer.ID + return ristore.Create(id, names, layer, metadata) +} + +func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + defer rcstore.Touch() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if id == "" { + id = stringid.GenerateRandomID() + } + + cimage, err := ristore.Get(image) + if err != nil { + return nil, err + } + if cimage == nil { + return nil, ErrImageUnknown + } + clayer, err := rlstore.Create(layer, cimage.TopLayer, nil, "", nil, true) + if err != nil { + return nil, err + } + layer = clayer.ID + container, err := rcstore.Create(id, names, cimage.ID, layer, metadata) + if err != nil || container == nil { + rlstore.Delete(layer) + } + return container, err +} + +func (s *store) SetMetadata(id, metadata string) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rlstore.Exists(id) { + defer rlstore.Touch() + return rlstore.SetMetadata(id, metadata) + } + if ristore.Exists(id) { + defer ristore.Touch() + return ristore.SetMetadata(id, metadata) + } + if rcstore.Exists(id) { + defer rcstore.Touch() + return rcstore.SetMetadata(id, metadata) + } + return ErrNotAnID +} + +func (s *store) GetMetadata(id string) (string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + ristore, err := s.GetImageStore() + if err != nil { + return "", err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rlstore.Exists(id) { + return rlstore.GetMetadata(id) + } + if ristore.Exists(id) { + return ristore.GetMetadata(id) + } + if rcstore.Exists(id) { + return rcstore.GetMetadata(id) + } + return "", ErrNotAnID +} + +func (s *store) ListImageBigData(id string) ([]string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.GetBigDataNames(id) +} + +func (s *store) GetImageBigDataSize(id, key string) (int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return -1, err + } + ristore, err := s.GetImageStore() + if err != nil { + return -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.GetBigDataSize(id, key) +} + +func (s *store) GetImageBigData(id, key string) ([]byte, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.GetBigData(id, key) +} + +func (s *store) SetImageBigData(id, key string, data []byte) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.SetBigData(id, key, data) +} + +func (s *store) ListContainerBigData(id string) ([]string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.GetBigDataNames(id) +} + +func (s *store) GetContainerBigDataSize(id, key string) (int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return -1, err + } + ristore, err := s.GetImageStore() + if err != nil { + return -1, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.GetBigDataSize(id, key) +} + +func (s *store) GetContainerBigData(id, key string) ([]byte, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.GetBigData(id, key) +} + +func (s *store) SetContainerBigData(id, key string, data []byte) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.SetBigData(id, key, data) +} + +func (s *store) Exists(id string) bool { + rcstore, err := s.GetContainerStore() + if err != nil { + return false + } + ristore, err := s.GetImageStore() + if err != nil { + return false + } + rlstore, err := s.GetLayerStore() + if err != nil { + return false + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rcstore.Exists(id) { + return true + } + if ristore.Exists(id) { + return true + } + return rlstore.Exists(id) +} + +func (s *store) SetNames(id string, names []string) error { + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + deduped := []string{} + seen := make(map[string]bool) + for _, name := range names { + if _, wasSeen := seen[name]; !wasSeen { + seen[name] = true + deduped = append(deduped, name) + } + } + + if rlstore.Exists(id) { + return rlstore.SetNames(id, deduped) + } + if ristore.Exists(id) { + return ristore.SetNames(id, deduped) + } + if rcstore.Exists(id) { + return rcstore.SetNames(id, deduped) + } + return ErrLayerUnknown +} + +func (s *store) GetNames(id string) ([]string, error) { + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if l, err := rlstore.Get(id); l != nil && err == nil { + return l.Names, nil + } + if i, err := ristore.Get(id); i != nil && err == nil { + return i.Names, nil + } + if c, err := rcstore.Get(id); c != nil && err == nil { + return c.Names, nil + } + return nil, ErrLayerUnknown +} + +func (s *store) Lookup(name string) (string, error) { + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + ristore, err := s.GetImageStore() + if err != nil { + return "", err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if l, err := rlstore.Get(name); l != nil && err == nil { + return l.ID, nil + } + if i, err := ristore.Get(name); i != nil && err == nil { + return i.ID, nil + } + if c, err := rcstore.Get(name); c != nil && err == nil { + return c.ID, nil + } + return "", ErrLayerUnknown +} + +func (s *store) DeleteLayer(id string) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rlstore.Exists(id) { + defer rlstore.Touch() + defer rcstore.Touch() + if l, err := rlstore.Get(id); err != nil { + id = l.ID + } + layers, err := rlstore.Layers() + if err != nil { + return err + } + for _, layer := range layers { + if layer.Parent == id { + return ErrLayerHasChildren + } + } + images, err := ristore.Images() + if err != nil { + return err + } + for _, image := range images { + if image.TopLayer == id { + return ErrLayerUsedByImage + } + } + containers, err := rcstore.Containers() + if err != nil { + return err + } + for _, container := range containers { + if container.LayerID == id { + return ErrLayerUsedByContainer + } + } + return rlstore.Delete(id) + } else { + return ErrNotALayer + } +} + +func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + layersToRemove := []string{} + if ristore.Exists(id) { + image, err := ristore.Get(id) + if err != nil { + return nil, err + } + id = image.ID + defer rlstore.Touch() + defer ristore.Touch() + containers, err := rcstore.Containers() + if err != nil { + return nil, err + } + aContainerByImage := make(map[string]string) + for _, container := range containers { + aContainerByImage[container.ImageID] = container.ID + } + if _, ok := aContainerByImage[id]; ok { + return nil, ErrImageUsedByContainer + } + images, err := ristore.Images() + if err != nil { + return nil, err + } + layers, err := rlstore.Layers() + if err != nil { + return nil, err + } + childrenByParent := make(map[string]*[]string) + for _, layer := range layers { + parent := layer.Parent + if list, ok := childrenByParent[parent]; ok { + newList := append(*list, layer.ID) + childrenByParent[parent] = &newList + } else { + childrenByParent[parent] = &([]string{layer.ID}) + } + } + anyImageByTopLayer := make(map[string]string) + for _, img := range images { + if img.ID != id { + anyImageByTopLayer[img.TopLayer] = img.ID + } + } + if commit { + if err = ristore.Delete(id); err != nil { + return nil, err + } + } + layer := image.TopLayer + lastRemoved := "" + for layer != "" { + if rcstore.Exists(layer) { + break + } + if _, ok := anyImageByTopLayer[layer]; ok { + break + } + parent := "" + if l, err := rlstore.Get(layer); err == nil { + parent = l.Parent + } + otherRefs := 0 + if childList, ok := childrenByParent[layer]; ok && childList != nil { + children := *childList + for _, child := range children { + if child != lastRemoved { + otherRefs++ + } + } + } + if otherRefs != 0 { + break + } + lastRemoved = layer + layersToRemove = append(layersToRemove, lastRemoved) + layer = parent + } + } else { + return nil, ErrNotAnImage + } + if commit { + for _, layer := range layersToRemove { + if err = rlstore.Delete(layer); err != nil { + return nil, err + } + } + } + return layersToRemove, nil +} + +func (s *store) DeleteContainer(id string) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rcstore.Exists(id) { + defer rlstore.Touch() + defer rcstore.Touch() + if container, err := rcstore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = rcstore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GetGraphRoot(), middleDir, container.ID) + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.GetRunRoot(), middleDir, container.ID) + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } else { + return ErrNotALayer + } + } + } else { + return ErrNotAContainer + } + return nil +} + +func (s *store) Delete(id string) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rcstore.Exists(id) { + defer rlstore.Touch() + defer rcstore.Touch() + if container, err := rcstore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = rcstore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GetGraphRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.GetRunRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } else { + return ErrNotALayer + } + } + } + if ristore.Exists(id) { + defer ristore.Touch() + return ristore.Delete(id) + } + if rlstore.Exists(id) { + defer rlstore.Touch() + return rlstore.Delete(id) + } + return ErrLayerUnknown +} + +func (s *store) Wipe() error { + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + defer ristore.Touch() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + defer rcstore.Touch() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if err = rcstore.Wipe(); err != nil { + return err + } + if err = ristore.Wipe(); err != nil { + return err + } + return rlstore.Wipe() +} + +func (s *store) Status() ([][2]string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + return rlstore.Status() +} + +func (s *store) Version() ([][2]string, error) { + return [][2]string{ + {"GitCommit", storageversion.GitCommit}, + {"Version", storageversion.Version}, + {"BuildTime", storageversion.BuildTime}, + }, nil +} + +func (s *store) Mount(id, mountLabel string) (string, error) { + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if c, err := rcstore.Get(id); c != nil && err == nil { + id = c.LayerID + } + return rlstore.Mount(id, mountLabel) +} + +func (s *store) Unmount(id string) error { + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if c, err := rcstore.Get(id); c != nil && err == nil { + id = c.LayerID + } + return rlstore.Unmount(id) +} + +func (s *store) Changes(from, to string) ([]archive.Change, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.Changes(from, to) +} + +func (s *store) DiffSize(from, to string) (int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.DiffSize(from, to) +} + +func (s *store) Diff(from, to string) (io.ReadCloser, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.Diff(from, to) +} + +func (s *store) ApplyDiff(to string, diff archive.Reader) (int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.ApplyDiff(to, diff) +} + +func (s *store) Layers() ([]Layer, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.Layers() +} + +func (s *store) Images() ([]Image, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.Images() +} + +func (s *store) Containers() ([]Container, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.Containers() +} + +func (s *store) GetLayer(id string) (*Layer, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.Get(id) +} + +func (s *store) GetImage(id string) (*Image, error) { + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.Get(id) +} + +func (s *store) GetImagesByTopLayer(id string) ([]*Image, error) { + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + layer, err := rlstore.Get(id) + if err != nil { + return nil, err + } + images := []*Image{} + imageList, err := ristore.Images() + if err != nil { + return nil, err + } + for _, image := range imageList { + if image.TopLayer == layer.ID { + images = append(images, &image) + } + } + + return images, nil +} + +func (s *store) GetContainer(id string) (*Container, error) { + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.Get(id) +} + +func (s *store) GetContainerByLayer(id string) (*Container, error) { + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + layer, err := rlstore.Get(id) + if err != nil { + return nil, err + } + containerList, err := rcstore.Containers() + if err != nil { + return nil, err + } + for _, container := range containerList { + if container.LayerID == layer.ID { + return &container, nil + } + } + + return nil, ErrContainerUnknown +} + +func (s *store) GetContainerDirectory(id string) (string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + ristore, err := s.GetImageStore() + if err != nil { + return "", err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GetGraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return "", err + } + return gcpath, nil +} + +func (s *store) GetContainerRunDirectory(id string) (string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + ristore, err := s.GetImageStore() + if err != nil { + return "", err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + rcpath := filepath.Join(s.GetRunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return "", err + } + return rcpath, nil +} + +func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { + dir, err := s.GetContainerDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) GetFromContainerDirectory(id, file string) ([]byte, error) { + dir, err := s.GetContainerDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error { + dir, err := s.GetContainerRunDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) GetFromContainerRunDirectory(id, file string) ([]byte, error) { + dir, err := s.GetContainerRunDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) Shutdown(force bool) ([]string, error) { + mounted := []string{} + modified := false + + rlstore, err := s.GetLayerStore() + if err != nil { + return mounted, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + s.graphLock.Lock() + defer s.graphLock.Unlock() + layers, err := rlstore.Layers() + if err != nil { + return mounted, err + } + for _, layer := range layers { + if layer.MountCount == 0 { + continue + } + mounted = append(mounted, layer.ID) + if force { + for layer.MountCount > 0 { + err2 := rlstore.Unmount(layer.ID) + if err2 != nil { + if err == nil { + err = err2 + } + break + } + modified = true + } + } + } + if len(mounted) > 0 && err == nil { + err = ErrLayerUsedByContainer + } + if err == nil { + err = s.graphDriver.Cleanup() + s.graphLock.Touch() + modified = true + } + if modified { + rlstore.Touch() + } + return mounted, err +} + +// Convert a BigData key name into an acceptable file name. +func makeBigDataBaseName(key string) string { + reader := strings.NewReader(key) + for reader.Len() > 0 { + ch, size, err := reader.ReadRune() + if err != nil || size != 1 { + break + } + if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') { + break + } + } + if reader.Len() > 0 { + return "=" + base64.StdEncoding.EncodeToString([]byte(key)) + } + return key +} + +func init() { + DefaultStoreOptions.RunRoot = "/var/run/containers" + DefaultStoreOptions.GraphRoot = "/var/lib/containers" + DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") + DefaultStoreOptions.GraphDriverOptions = strings.Split(os.Getenv("STORAGE_OPTS"), ",") + if len(DefaultStoreOptions.GraphDriverOptions) == 1 && DefaultStoreOptions.GraphDriverOptions[0] == "" { + DefaultStoreOptions.GraphDriverOptions = nil + } +} diff --git a/vendor/github.com/containers/storage/storageversion/version_lib.go b/vendor/github.com/containers/storage/storageversion/version_lib.go new file mode 100644 index 00000000..761868e3 --- /dev/null +++ b/vendor/github.com/containers/storage/storageversion/version_lib.go @@ -0,0 +1,13 @@ +// +build !autogen + +// Package storageversion is auto-generated at build-time +package storageversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" +) diff --git a/vendor/github.com/go-check/check/.travis.yml b/vendor/github.com/go-check/check/.travis.yml new file mode 100644 index 00000000..ead6735f --- /dev/null +++ b/vendor/github.com/go-check/check/.travis.yml @@ -0,0 +1,3 @@ +language: go + +go_import_path: gopkg.in/check.v1 diff --git a/vendor/github.com/mistifyio/go-zfs/.gitignore b/vendor/github.com/mistifyio/go-zfs/.gitignore new file mode 100644 index 00000000..8000dd9d --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/vendor/github.com/mistifyio/go-zfs/.travis.yml b/vendor/github.com/mistifyio/go-zfs/.travis.yml new file mode 100644 index 00000000..a8723a19 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/.travis.yml @@ -0,0 +1,41 @@ +language: go +sudo: required +dist: trusty + +branches: + only: + - master + +env: + - rel=0.6.4.2 + - rel=0.6.5.4 + +matrix: + allow_failures: + - env: rel=0.6.5.4 + +go: + - 1.5 + +before_install: + - MAKEFLAGS=-j$(($(grep -c '^processor' /proc/cpuinfo) * 2 + 1)) + - sudo apt-get update -y && sudo apt-get install -y linux-headers-$(uname -r) uuid-dev tree + - cd /tmp + - curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/spl-$rel.tar.gz | tar xz + - curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/zfs-$rel.tar.gz | tar xz + - (cd spl-$rel && ./configure --prefix=/usr && make && sudo make install) + - (cd zfs-$rel && ./configure --prefix=/usr && make && sudo make install) + - sudo modprobe zfs + - cd $TRAVIS_BUILD_DIR + - go get github.com/alecthomas/gometalinter + - gometalinter --install --update + +script: + - sudo -E $(which go) test -v ./... + - gometalinter --disable=golint --disable=vetshadow --enable=gofmt ./... || true + - gometalinter --disable-all --enable=golint --enable=vetshadow ./... || true + +notifications: + email: false + slack: + secure: "AbDJNjWyf/z+neX0HtoIUynjBcdvbhrsuyzoeaImZaanUtyo3cWNpA1M+5CApDQneaKbLqcehDBTjaLQD1fjXXtWrNvq+FgCRDJ1gvZasq13iJfYe3qtLz7n0YGHqEGzZ1lsheWtle/Sg32RlPAUZrHKWPciu7/Fg1k1ca8FsB4=" diff --git a/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md new file mode 100644 index 00000000..f1880c19 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md @@ -0,0 +1,60 @@ +## How to Contribute ## + +We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute. + +### Reporting issues ### + +We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests. + +If you find a bug: + +* Use the GitHub issue search to check whether the bug has already been reported. +* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository. +* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug. + +### Pull requests ### + +We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request. + +[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote: + + $ git clone git@github.com:/go-zfs.git + $ cd go-zfs + $ git remote add upstream https://github.com/mistifyio/go-zfs.git + +If you need to pull new changes committed upstream: + + $ git checkout master + $ git fetch upstream + $ git merge upstream/master + +Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature: + + $ git checkout -b + +Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message. + + $ git commit -m "Issue # - " + +Push your feature branch to your fork. + + $ git push origin + +[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes. + +* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/). +* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing). + +**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE). + +### Go Tools ### +For consistency and to catch minor issues for all of go code, please run the following: +* goimports +* go vet +* golint +* errcheck + +Many editors can execute the above on save. + +---- +Guidelines based on http://azkaban.github.io/contributing.html diff --git a/vendor/github.com/mistifyio/go-zfs/LICENSE b/vendor/github.com/mistifyio/go-zfs/LICENSE new file mode 100644 index 00000000..f4c265cf --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2014, OmniTI Computer Consulting, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/mistifyio/go-zfs/README.md b/vendor/github.com/mistifyio/go-zfs/README.md new file mode 100644 index 00000000..2515e588 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/README.md @@ -0,0 +1,54 @@ +# Go Wrapper for ZFS # + +Simple wrappers for ZFS command line tools. + +[![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs) + +## Requirements ## + +You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS: + + sudo apt-get install python-software-properties + sudo apt-add-repository ppa:zfs-native/stable + sudo apt-get update + sudo apt-get install ubuntu-zfs libzfs-dev + +Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install + +Generally you need root privileges to use anything zfs related. + +## Status ## + +This has been only been tested on Ubuntu 14.04 + +In the future, we hope to work directly with libzfs. + +# Hacking # + +The tests have decent examples for most functions. + +```go +//assuming a zpool named test +//error handling ommitted + + +f, err := zfs.CreateFilesystem("test/snapshot-test", nil) +ok(t, err) + +s, err := f.Snapshot("test", nil) +ok(t, err) + +// snapshot is named "test/snapshot-test@test" + +c, err := s.Clone("test/clone-test", nil) + +err := c.Destroy() +err := s.Destroy() +err := f.Destroy() + +``` + +# Contributing # + +See the [contributing guidelines](./CONTRIBUTING.md) + diff --git a/vendor/github.com/mistifyio/go-zfs/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/Vagrantfile new file mode 100644 index 00000000..3bd6e120 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/Vagrantfile @@ -0,0 +1,34 @@ + +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "ubuntu/trusty64" + config.ssh.forward_agent = true + + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true + + config.vm.provision "shell", inline: < /etc/profile.d/go.sh +export GOPATH=\\$HOME/go +export PATH=\\$GOPATH/bin:/usr/local/go/bin:\\$PATH +END + +chown -R vagrant /home/vagrant/go + +apt-get update +apt-get install -y software-properties-common curl +apt-add-repository --yes ppa:zfs-native/stable +apt-get update +apt-get install -y ubuntu-zfs + +cd /home/vagrant +curl -z go1.3.3.linux-amd64.tar.gz -L -O https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz +tar -C /usr/local -zxf /home/vagrant/go1.3.3.linux-amd64.tar.gz + +cat << END > /etc/sudoers.d/go +Defaults env_keep += "GOPATH" +END + +EOF + +end diff --git a/vendor/github.com/mistifyio/go-zfs/error.go b/vendor/github.com/mistifyio/go-zfs/error.go new file mode 100644 index 00000000..5408ccdb --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/error.go @@ -0,0 +1,18 @@ +package zfs + +import ( + "fmt" +) + +// Error is an error which is returned when the `zfs` or `zpool` shell +// commands return with a non-zero exit code. +type Error struct { + Err error + Debug string + Stderr string +} + +// Error returns the string representation of an Error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr) +} diff --git a/vendor/github.com/mistifyio/go-zfs/utils.go b/vendor/github.com/mistifyio/go-zfs/utils.go new file mode 100644 index 00000000..c7178d6f --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/utils.go @@ -0,0 +1,352 @@ +package zfs + +import ( + "bytes" + "errors" + "fmt" + "io" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/pborman/uuid" +) + +type command struct { + Command string + Stdin io.Reader + Stdout io.Writer +} + +func (c *command) Run(arg ...string) ([][]string, error) { + + cmd := exec.Command(c.Command, arg...) + + var stdout, stderr bytes.Buffer + + if c.Stdout == nil { + cmd.Stdout = &stdout + } else { + cmd.Stdout = c.Stdout + } + + if c.Stdin != nil { + cmd.Stdin = c.Stdin + + } + cmd.Stderr = &stderr + + id := uuid.New() + joinedArgs := strings.Join(cmd.Args, " ") + + logger.Log([]string{"ID:" + id, "START", joinedArgs}) + err := cmd.Run() + logger.Log([]string{"ID:" + id, "FINISH"}) + + if err != nil { + return nil, &Error{ + Err: err, + Debug: strings.Join([]string{cmd.Path, joinedArgs}, " "), + Stderr: stderr.String(), + } + } + + // assume if you passed in something for stdout, that you know what to do with it + if c.Stdout != nil { + return nil, nil + } + + lines := strings.Split(stdout.String(), "\n") + + //last line is always blank + lines = lines[0 : len(lines)-1] + output := make([][]string, len(lines)) + + for i, l := range lines { + output[i] = strings.Fields(l) + } + + return output, nil +} + +func setString(field *string, value string) { + v := "" + if value != "-" { + v = value + } + *field = v +} + +func setUint(field *uint64, value string) error { + var v uint64 + if value != "-" { + var err error + v, err = strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + } + *field = v + return nil +} + +func (ds *Dataset) parseLine(line []string) error { + var err error + + if len(line) != len(dsPropList) { + return errors.New("Output does not match what is expected on this platform") + } + setString(&ds.Name, line[0]) + setString(&ds.Origin, line[1]) + + if err = setUint(&ds.Used, line[2]); err != nil { + return err + } + if err = setUint(&ds.Avail, line[3]); err != nil { + return err + } + + setString(&ds.Mountpoint, line[4]) + setString(&ds.Compression, line[5]) + setString(&ds.Type, line[6]) + + if err = setUint(&ds.Volsize, line[7]); err != nil { + return err + } + if err = setUint(&ds.Quota, line[8]); err != nil { + return err + } + + if runtime.GOOS == "solaris" { + return nil + } + + if err = setUint(&ds.Written, line[9]); err != nil { + return err + } + if err = setUint(&ds.Logicalused, line[10]); err != nil { + return err + } + if err = setUint(&ds.Usedbydataset, line[11]); err != nil { + return err + } + return nil +} + +/* + * from zfs diff`s escape function: + * + * Prints a file name out a character at a time. If the character is + * not in the range of what we consider "printable" ASCII, display it + * as an escaped 3-digit octal value. ASCII values less than a space + * are all control characters and we declare the upper end as the + * DELete character. This also is the last 7-bit ASCII character. + * We choose to treat all 8-bit ASCII as not printable for this + * application. + */ +func unescapeFilepath(path string) (string, error) { + buf := make([]byte, 0, len(path)) + llen := len(path) + for i := 0; i < llen; { + if path[i] == '\\' { + if llen < i+4 { + return "", fmt.Errorf("Invalid octal code: too short") + } + octalCode := path[(i + 1):(i + 4)] + val, err := strconv.ParseUint(octalCode, 8, 8) + if err != nil { + return "", fmt.Errorf("Invalid octal code: %v", err) + } + buf = append(buf, byte(val)) + i += 4 + } else { + buf = append(buf, path[i]) + i++ + } + } + return string(buf), nil +} + +var changeTypeMap = map[string]ChangeType{ + "-": Removed, + "+": Created, + "M": Modified, + "R": Renamed, +} +var inodeTypeMap = map[string]InodeType{ + "B": BlockDevice, + "C": CharacterDevice, + "/": Directory, + ">": Door, + "|": NamedPipe, + "@": SymbolicLink, + "P": EventPort, + "=": Socket, + "F": File, +} + +// matches (+1) or (-1) +var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)") + +func parseReferenceCount(field string) (int, error) { + matches := referenceCountRegex.FindStringSubmatch(field) + if matches == nil { + return 0, fmt.Errorf("Regexp does not match") + } + return strconv.Atoi(matches[1]) +} + +func parseInodeChange(line []string) (*InodeChange, error) { + llen := len(line) + if llen < 1 { + return nil, fmt.Errorf("Empty line passed") + } + + changeType := changeTypeMap[line[0]] + if changeType == 0 { + return nil, fmt.Errorf("Unknown change type '%s'", line[0]) + } + + switch changeType { + case Renamed: + if llen != 4 { + return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen) + } + case Modified: + if llen != 4 && llen != 3 { + return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen) + } + default: + if llen != 3 { + return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen) + } + } + + inodeType := inodeTypeMap[line[1]] + if inodeType == 0 { + return nil, fmt.Errorf("Unknown inode type '%s'", line[1]) + } + + path, err := unescapeFilepath(line[2]) + if err != nil { + return nil, fmt.Errorf("Failed to parse filename: %v", err) + } + + var newPath string + var referenceCount int + switch changeType { + case Renamed: + newPath, err = unescapeFilepath(line[3]) + if err != nil { + return nil, fmt.Errorf("Failed to parse filename: %v", err) + } + case Modified: + if llen == 4 { + referenceCount, err = parseReferenceCount(line[3]) + if err != nil { + return nil, fmt.Errorf("Failed to parse reference count: %v", err) + } + } + default: + newPath = "" + } + + return &InodeChange{ + Change: changeType, + Type: inodeType, + Path: path, + NewPath: newPath, + ReferenceCountChange: referenceCount, + }, nil +} + +// example input +//M / /testpool/bar/ +//+ F /testpool/bar/hello.txt +//M / /testpool/bar/hello.txt (+1) +//M / /testpool/bar/hello-hardlink +func parseInodeChanges(lines [][]string) ([]*InodeChange, error) { + changes := make([]*InodeChange, len(lines)) + + for i, line := range lines { + c, err := parseInodeChange(line) + if err != nil { + return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line) + } + changes[i] = c + } + return changes, nil +} + +func listByType(t, filter string) ([]*Dataset, error) { + args := []string{"list", "-rHp", "-t", t, "-o", dsPropListOptions} + + if filter != "" { + args = append(args, filter) + } + out, err := zfs(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return datasets, nil +} + +func propsSlice(properties map[string]string) []string { + args := make([]string, 0, len(properties)*3) + for k, v := range properties { + args = append(args, "-o") + args = append(args, fmt.Sprintf("%s=%s", k, v)) + } + return args +} + +func (z *Zpool) parseLine(line []string) error { + prop := line[1] + val := line[2] + + var err error + + switch prop { + case "name": + setString(&z.Name, val) + case "health": + setString(&z.Health, val) + case "allocated": + err = setUint(&z.Allocated, val) + case "size": + err = setUint(&z.Size, val) + case "free": + err = setUint(&z.Free, val) + case "fragmentation": + // Trim trailing "%" before parsing uint + err = setUint(&z.Fragmentation, val[:len(val)-1]) + case "readonly": + z.ReadOnly = val == "on" + case "freeing": + err = setUint(&z.Freeing, val) + case "leaked": + err = setUint(&z.Leaked, val) + case "dedupratio": + // Trim trailing "x" before parsing float64 + z.DedupRatio, err = strconv.ParseFloat(val[:len(val)-1], 64) + } + return err +} diff --git a/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go new file mode 100644 index 00000000..b2b0cd96 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go @@ -0,0 +1,17 @@ +// +build !solaris + +package zfs + +import ( + "strings" +) + +// List of ZFS properties to retrieve from zfs list command on a non-Solaris platform +var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "written", "logicalused", "usedbydataset"} + +var dsPropListOptions = strings.Join(dsPropList, ",") + +// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform +var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"} +var zpoolPropListOptions = strings.Join(zpoolPropList, ",") +var zpoolArgs = []string{"get", "-p", zpoolPropListOptions} diff --git a/vendor/github.com/mistifyio/go-zfs/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/utils_solaris.go new file mode 100644 index 00000000..4b0a9e92 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/utils_solaris.go @@ -0,0 +1,17 @@ +// +build solaris + +package zfs + +import ( + "strings" +) + +// List of ZFS properties to retrieve from zfs list command on a Solaris platform +var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota"} + +var dsPropListOptions = strings.Join(dsPropList, ",") + +// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform +var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"} +var zpoolPropListOptions = strings.Join(zpoolPropList, ",") +var zpoolArgs = []string{"get", "-p", zpoolPropListOptions} diff --git a/vendor/github.com/mistifyio/go-zfs/zfs.go b/vendor/github.com/mistifyio/go-zfs/zfs.go new file mode 100644 index 00000000..615337d0 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/zfs.go @@ -0,0 +1,451 @@ +// Package zfs provides wrappers around the ZFS command line tools. +package zfs + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// ZFS dataset types, which can indicate if a dataset is a filesystem, +// snapshot, or volume. +const ( + DatasetFilesystem = "filesystem" + DatasetSnapshot = "snapshot" + DatasetVolume = "volume" +) + +// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, +// or volume. The Type struct member can be used to determine a dataset's type. +// +// The field definitions can be found in the ZFS manual: +// http://www.freebsd.org/cgi/man.cgi?zfs(8). +type Dataset struct { + Name string + Origin string + Used uint64 + Avail uint64 + Mountpoint string + Compression string + Type string + Written uint64 + Volsize uint64 + Logicalused uint64 + Usedbydataset uint64 + Quota uint64 +} + +// InodeType is the type of inode as reported by Diff +type InodeType int + +// Types of Inodes +const ( + _ = iota // 0 == unknown type + BlockDevice InodeType = iota + CharacterDevice + Directory + Door + NamedPipe + SymbolicLink + EventPort + Socket + File +) + +// ChangeType is the type of inode change as reported by Diff +type ChangeType int + +// Types of Changes +const ( + _ = iota // 0 == unknown type + Removed ChangeType = iota + Created + Modified + Renamed +) + +// DestroyFlag is the options flag passed to Destroy +type DestroyFlag int + +// Valid destroy options +const ( + DestroyDefault DestroyFlag = 1 << iota + DestroyRecursive = 1 << iota + DestroyRecursiveClones = 1 << iota + DestroyDeferDeletion = 1 << iota + DestroyForceUmount = 1 << iota +) + +// InodeChange represents a change as reported by Diff +type InodeChange struct { + Change ChangeType + Type InodeType + Path string + NewPath string + ReferenceCountChange int +} + +// Logger can be used to log commands/actions +type Logger interface { + Log(cmd []string) +} + +type defaultLogger struct{} + +func (*defaultLogger) Log(cmd []string) { + return +} + +var logger Logger = &defaultLogger{} + +// SetLogger set a log handler to log all commands including arguments before +// they are executed +func SetLogger(l Logger) { + if l != nil { + logger = l + } +} + +// zfs is a helper function to wrap typical calls to zfs. +func zfs(arg ...string) ([][]string, error) { + c := command{Command: "zfs"} + return c.Run(arg...) +} + +// Datasets returns a slice of ZFS datasets, regardless of type. +// A filter argument may be passed to select a dataset with the matching name, +// or empty string ("") may be used to select all datasets. +func Datasets(filter string) ([]*Dataset, error) { + return listByType("all", filter) +} + +// Snapshots returns a slice of ZFS snapshots. +// A filter argument may be passed to select a snapshot with the matching name, +// or empty string ("") may be used to select all snapshots. +func Snapshots(filter string) ([]*Dataset, error) { + return listByType(DatasetSnapshot, filter) +} + +// Filesystems returns a slice of ZFS filesystems. +// A filter argument may be passed to select a filesystem with the matching name, +// or empty string ("") may be used to select all filesystems. +func Filesystems(filter string) ([]*Dataset, error) { + return listByType(DatasetFilesystem, filter) +} + +// Volumes returns a slice of ZFS volumes. +// A filter argument may be passed to select a volume with the matching name, +// or empty string ("") may be used to select all volumes. +func Volumes(filter string) ([]*Dataset, error) { + return listByType(DatasetVolume, filter) +} + +// GetDataset retrieves a single ZFS dataset by name. This dataset could be +// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume. +func GetDataset(name string) (*Dataset, error) { + out, err := zfs("list", "-Hp", "-o", dsPropListOptions, name) + if err != nil { + return nil, err + } + + ds := &Dataset{Name: name} + for _, line := range out { + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return ds, nil +} + +// Clone clones a ZFS snapshot and returns a clone dataset. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) { + if d.Type != DatasetSnapshot { + return nil, errors.New("can only clone snapshots") + } + args := make([]string, 2, 4) + args[0] = "clone" + args[1] = "-p" + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, []string{d.Name, dest}...) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(dest) +} + +// Unmount unmounts currently mounted ZFS file systems. +func (d *Dataset) Unmount(force bool) (*Dataset, error) { + if d.Type == DatasetSnapshot { + return nil, errors.New("cannot unmount snapshots") + } + args := make([]string, 1, 3) + args[0] = "umount" + if force { + args = append(args, "-f") + } + args = append(args, d.Name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(d.Name) +} + +// Mount mounts ZFS file systems. +func (d *Dataset) Mount(overlay bool, options []string) (*Dataset, error) { + if d.Type == DatasetSnapshot { + return nil, errors.New("cannot mount snapshots") + } + args := make([]string, 1, 5) + args[0] = "mount" + if overlay { + args = append(args, "-O") + } + if options != nil { + args = append(args, "-o") + args = append(args, strings.Join(options, ",")) + } + args = append(args, d.Name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(d.Name) +} + +// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a +// new snapshot with the specified name, and streams the input data into the +// newly-created snapshot. +func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) { + c := command{Command: "zfs", Stdin: input} + _, err := c.Run("receive", name) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) SendSnapshot(output io.Writer) error { + if d.Type != DatasetSnapshot { + return errors.New("can only send snapshots") + } + + c := command{Command: "zfs", Stdout: output} + _, err := c.Run("send", d.Name) + return err +} + +// CreateVolume creates a new ZFS volume with the specified name, size, and +// properties. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) { + args := make([]string, 4, 5) + args[0] = "create" + args[1] = "-p" + args[2] = "-V" + args[3] = strconv.FormatUint(size, 10) + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any +// descendents of the dataset will be recursively destroyed, including snapshots. +// If the deferred bit flag is set, the snapshot is marked for deferred +// deletion. +func (d *Dataset) Destroy(flags DestroyFlag) error { + args := make([]string, 1, 3) + args[0] = "destroy" + if flags&DestroyRecursive != 0 { + args = append(args, "-r") + } + + if flags&DestroyRecursiveClones != 0 { + args = append(args, "-R") + } + + if flags&DestroyDeferDeletion != 0 { + args = append(args, "-d") + } + + if flags&DestroyForceUmount != 0 { + args = append(args, "-f") + } + + args = append(args, d.Name) + _, err := zfs(args...) + return err +} + +// SetProperty sets a ZFS property on the receiving dataset. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func (d *Dataset) SetProperty(key, val string) error { + prop := strings.Join([]string{key, val}, "=") + _, err := zfs("set", prop, d.Name) + return err +} + +// GetProperty returns the current value of a ZFS property from the +// receiving dataset. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func (d *Dataset) GetProperty(key string) (string, error) { + out, err := zfs("get", key, d.Name) + if err != nil { + return "", err + } + + return out[0][2], nil +} + +// Rename renames a dataset. +func (d *Dataset) Rename(name string, createParent bool, recursiveRenameSnapshots bool) (*Dataset, error) { + args := make([]string, 3, 5) + args[0] = "rename" + args[1] = d.Name + args[2] = name + if createParent { + args = append(args, "-p") + } + if recursiveRenameSnapshots { + args = append(args, "-r") + } + _, err := zfs(args...) + if err != nil { + return d, err + } + + return GetDataset(name) +} + +// Snapshots returns a slice of all ZFS snapshots of a given dataset. +func (d *Dataset) Snapshots() ([]*Dataset, error) { + return Snapshots(d.Name) +} + +// CreateFilesystem creates a new ZFS filesystem with the specified name and +// properties. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "create" + + if properties != nil { + args = append(args, propsSlice(properties)...) + } + + args = append(args, name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// Snapshot creates a new ZFS snapshot of the receiving dataset, using the +// specified name. Optionally, the snapshot can be taken recursively, creating +// snapshots of all descendent filesystems in a single, atomic operation. +func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "snapshot" + if recursive { + args = append(args, "-r") + } + snapName := fmt.Sprintf("%s@%s", d.Name, name) + args = append(args, snapName) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(snapName) +} + +// Rollback rolls back the receiving ZFS dataset to a previous snapshot. +// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot +// rollback cannot be completed without this option, if more recent +// snapshots exist. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Rollback(destroyMoreRecent bool) error { + if d.Type != DatasetSnapshot { + return errors.New("can only rollback snapshots") + } + + args := make([]string, 1, 3) + args[0] = "rollback" + if destroyMoreRecent { + args = append(args, "-r") + } + args = append(args, d.Name) + + _, err := zfs(args...) + return err +} + +// Children returns a slice of children of the receiving ZFS dataset. +// A recursion depth may be specified, or a depth of 0 allows unlimited +// recursion. +func (d *Dataset) Children(depth uint64) ([]*Dataset, error) { + args := []string{"list"} + if depth > 0 { + args = append(args, "-d") + args = append(args, strconv.FormatUint(depth, 10)) + } else { + args = append(args, "-r") + } + args = append(args, "-t", "all", "-Hp", "-o", dsPropListOptions) + args = append(args, d.Name) + + out, err := zfs(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + return datasets[1:], nil +} + +// Diff returns changes between a snapshot and the given ZFS dataset. +// The snapshot name must include the filesystem part as it is possible to +// compare clones with their origin snapshots. +func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) { + args := []string{"diff", "-FH", snapshot, d.Name}[:] + out, err := zfs(args...) + if err != nil { + return nil, err + } + inodeChanges, err := parseInodeChanges(out) + if err != nil { + return nil, err + } + return inodeChanges, nil +} diff --git a/vendor/github.com/mistifyio/go-zfs/zpool.go b/vendor/github.com/mistifyio/go-zfs/zpool.go new file mode 100644 index 00000000..d8db945d --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/zpool.go @@ -0,0 +1,112 @@ +package zfs + +// ZFS zpool states, which can indicate if a pool is online, offline, +// degraded, etc. More information regarding zpool states can be found here: +// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html. +const ( + ZpoolOnline = "ONLINE" + ZpoolDegraded = "DEGRADED" + ZpoolFaulted = "FAULTED" + ZpoolOffline = "OFFLINE" + ZpoolUnavail = "UNAVAIL" + ZpoolRemoved = "REMOVED" +) + +// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can +// contain many descendent datasets. +type Zpool struct { + Name string + Health string + Allocated uint64 + Size uint64 + Free uint64 + Fragmentation uint64 + ReadOnly bool + Freeing uint64 + Leaked uint64 + DedupRatio float64 +} + +// zpool is a helper function to wrap typical calls to zpool. +func zpool(arg ...string) ([][]string, error) { + c := command{Command: "zpool"} + return c.Run(arg...) +} + +// GetZpool retrieves a single ZFS zpool by name. +func GetZpool(name string) (*Zpool, error) { + args := zpoolArgs + args = append(args, name) + out, err := zpool(args...) + if err != nil { + return nil, err + } + + // there is no -H + out = out[1:] + + z := &Zpool{Name: name} + for _, line := range out { + if err := z.parseLine(line); err != nil { + return nil, err + } + } + + return z, nil +} + +// Datasets returns a slice of all ZFS datasets in a zpool. +func (z *Zpool) Datasets() ([]*Dataset, error) { + return Datasets(z.Name) +} + +// Snapshots returns a slice of all ZFS snapshots in a zpool. +func (z *Zpool) Snapshots() ([]*Dataset, error) { + return Snapshots(z.Name) +} + +// CreateZpool creates a new ZFS zpool with the specified name, properties, +// and optional arguments. +// A full list of available ZFS properties and command-line arguments may be +// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) { + cli := make([]string, 1, 4) + cli[0] = "create" + if properties != nil { + cli = append(cli, propsSlice(properties)...) + } + cli = append(cli, name) + cli = append(cli, args...) + _, err := zpool(cli...) + if err != nil { + return nil, err + } + + return &Zpool{Name: name}, nil +} + +// Destroy destroys a ZFS zpool by name. +func (z *Zpool) Destroy() error { + _, err := zpool("destroy", z.Name) + return err +} + +// ListZpools list all ZFS zpools accessible on the current system. +func ListZpools() ([]*Zpool, error) { + args := []string{"list", "-Ho", "name"} + out, err := zpool(args...) + if err != nil { + return nil, err + } + + var pools []*Zpool + + for _, line := range out { + z, err := GetZpool(line[0]) + if err != nil { + return nil, err + } + pools = append(pools, z) + } + return pools, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/label/label.go b/vendor/github.com/opencontainers/runc/libcontainer/label/label.go new file mode 100644 index 00000000..fddec463 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/label/label.go @@ -0,0 +1,84 @@ +// +build !selinux !linux + +package label + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. +func InitLabels(options []string) (string, string, error) { + return "", "", nil +} + +func GetROMountLabel() string { + return "" +} + +func GenLabels(options string) (string, string, error) { + return "", "", nil +} + +func FormatMountLabel(src string, mountLabel string) string { + return src +} + +func SetProcessLabel(processLabel string) error { + return nil +} + +func GetFileLabel(path string) (string, error) { + return "", nil +} + +func SetFileLabel(path string, fileLabel string) error { + return nil +} + +func SetFileCreateLabel(fileLabel string) error { + return nil +} + +func Relabel(path string, fileLabel string, shared bool) error { + return nil +} + +func GetPidLabel(pid int) (string, error) { + return "", nil +} + +func Init() { +} + +func ReserveLabel(label string) error { + return nil +} + +func UnreserveLabel(label string) error { + return nil +} + +// DupSecOpt takes a process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) []string { + return nil +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return nil +} + +// Validate checks that the label does not include unexpected options +func Validate(label string) error { + return nil +} + +// RelabelNeeded checks whether the user requested a relabel +func RelabelNeeded(label string) bool { + return false +} + +// IsShared checks that the label includes a "shared" mark +func IsShared(label string) bool { + return false +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go b/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go new file mode 100644 index 00000000..65bccdb8 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go @@ -0,0 +1,208 @@ +// +build selinux,linux + +package label + +import ( + "fmt" + "strings" + + "github.com/opencontainers/runc/libcontainer/selinux" +) + +// Valid Label Options +var validOptions = map[string]bool{ + "disable": true, + "type": true, + "user": true, + "role": true, + "level": true, +} + +var ErrIncompatibleLabel = fmt.Errorf("Bad SELinux option z and Z can not be used together") + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. The labels returned will include a random MCS String, that is +// guaranteed to be unique. +func InitLabels(options []string) (string, string, error) { + if !selinux.SelinuxEnabled() { + return "", "", nil + } + processLabel, mountLabel := selinux.GetLxcContexts() + if processLabel != "" { + pcon := selinux.NewContext(processLabel) + mcon := selinux.NewContext(mountLabel) + for _, opt := range options { + val := strings.SplitN(opt, "=", 2) + if val[0] != "label" { + continue + } + if len(val) < 2 { + return "", "", fmt.Errorf("bad label option %q, valid options 'disable' or \n'user, role, level, type' followed by ':' and a value", opt) + } + if val[1] == "disable" { + return "", "", nil + } + con := strings.SplitN(val[1], ":", 2) + if len(con) < 2 || !validOptions[con[0]] { + return "", "", fmt.Errorf("bad label option %q, valid options 'disable, user, role, level, type'", con[0]) + + } + pcon[con[0]] = con[1] + if con[0] == "level" || con[0] == "user" { + mcon[con[0]] = con[1] + } + } + processLabel = pcon.Get() + mountLabel = mcon.Get() + } + return processLabel, mountLabel, nil +} + +func GetROMountLabel() string { + return selinux.GetROFileLabel() +} + +// DEPRECATED: The GenLabels function is only to be used during the transition to the official API. +func GenLabels(options string) (string, string, error) { + return InitLabels(strings.Fields(options)) +} + +// FormatMountLabel returns a string to be used by the mount command. +// The format of this string will be used to alter the labeling of the mountpoint. +// The string returned is suitable to be used as the options field of the mount command. +// If you need to have additional mount point options, you can pass them in as +// the first parameter. Second parameter is the label that you wish to apply +// to all content in the mount point. +func FormatMountLabel(src, mountLabel string) string { + if mountLabel != "" { + switch src { + case "": + src = fmt.Sprintf("context=%q", mountLabel) + default: + src = fmt.Sprintf("%s,context=%q", src, mountLabel) + } + } + return src +} + +// SetProcessLabel takes a process label and tells the kernel to assign the +// label to the next program executed by the current process. +func SetProcessLabel(processLabel string) error { + if processLabel == "" { + return nil + } + return selinux.Setexeccon(processLabel) +} + +// GetProcessLabel returns the process label that the kernel will assign +// to the next program executed by the current process. If "" is returned +// this indicates that the default labeling will happen for the process. +func GetProcessLabel() (string, error) { + return selinux.Getexeccon() +} + +// GetFileLabel returns the label for specified path +func GetFileLabel(path string) (string, error) { + return selinux.Getfilecon(path) +} + +// SetFileLabel modifies the "path" label to the specified file label +func SetFileLabel(path string, fileLabel string) error { + if selinux.SelinuxEnabled() && fileLabel != "" { + return selinux.Setfilecon(path, fileLabel) + } + return nil +} + +// SetFileCreateLabel tells the kernel the label for all files to be created +func SetFileCreateLabel(fileLabel string) error { + if selinux.SelinuxEnabled() { + return selinux.Setfscreatecon(fileLabel) + } + return nil +} + +// Relabel changes the label of path to the filelabel string. +// It changes the MCS label to s0 if shared is true. +// This will allow all containers to share the content. +func Relabel(path string, fileLabel string, shared bool) error { + if !selinux.SelinuxEnabled() { + return nil + } + + if fileLabel == "" { + return nil + } + + exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true} + if exclude_paths[path] { + return fmt.Errorf("SELinux relabeling of %s is not allowed", path) + } + + if shared { + c := selinux.NewContext(fileLabel) + c["level"] = "s0" + fileLabel = c.Get() + } + if err := selinux.Chcon(path, fileLabel, true); err != nil { + return fmt.Errorf("SELinux relabeling of %s is not allowed: %q", path, err) + } + return nil +} + +// GetPidLabel will return the label of the process running with the specified pid +func GetPidLabel(pid int) (string, error) { + return selinux.Getpidcon(pid) +} + +// Init initialises the labeling system +func Init() { + selinux.SelinuxEnabled() +} + +// ReserveLabel will record the fact that the MCS label has already been used. +// This will prevent InitLabels from using the MCS label in a newly created +// container +func ReserveLabel(label string) error { + selinux.ReserveLabel(label) + return nil +} + +// UnreserveLabel will remove the reservation of the MCS label. +// This will allow InitLabels to use the MCS label in a newly created +// containers +func UnreserveLabel(label string) error { + selinux.FreeLxcContexts(label) + return nil +} + +// DupSecOpt takes a process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) []string { + return selinux.DupSecOpt(src) +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return selinux.DisableSecOpt() +} + +// Validate checks that the label does not include unexpected options +func Validate(label string) error { + if strings.Contains(label, "z") && strings.Contains(label, "Z") { + return ErrIncompatibleLabel + } + return nil +} + +// RelabelNeeded checks whether the user requested a relabel +func RelabelNeeded(label string) bool { + return strings.Contains(label, "z") || strings.Contains(label, "Z") +} + +// IsShared checks that the label includes a "shared" mark +func IsShared(label string) bool { + return strings.Contains(label, "z") +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go new file mode 100644 index 00000000..1afc52b4 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -0,0 +1,143 @@ +// +build linux + +package system + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "syscall" + "unsafe" +) + +// If arg2 is nonzero, set the "child subreaper" attribute of the +// calling process; if arg2 is zero, unset the attribute. When a +// process is marked as a child subreaper, all of the children +// that it creates, and their descendants, will be marked as +// having a subreaper. In effect, a subreaper fulfills the role +// of init(1) for its descendant processes. Upon termination of +// a process that is orphaned (i.e., its immediate parent has +// already terminated) and marked as having a subreaper, the +// nearest still living ancestor subreaper will receive a SIGCHLD +// signal and be able to wait(2) on the process to discover its +// termination status. +const PR_SET_CHILD_SUBREAPER = 36 + +type ParentDeathSignal int + +func (p ParentDeathSignal) Restore() error { + if p == 0 { + return nil + } + current, err := GetParentDeathSignal() + if err != nil { + return err + } + if p == current { + return nil + } + return p.Set() +} + +func (p ParentDeathSignal) Set() error { + return SetParentDeathSignal(uintptr(p)) +} + +func Execv(cmd string, args []string, env []string) error { + name, err := exec.LookPath(cmd) + if err != nil { + return err + } + + return syscall.Exec(name, args, env) +} + +func Prlimit(pid, resource int, limit syscall.Rlimit) error { + _, _, err := syscall.RawSyscall6(syscall.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) + if err != 0 { + return err + } + return nil +} + +func SetParentDeathSignal(sig uintptr) error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { + return err + } + return nil +} + +func GetParentDeathSignal() (ParentDeathSignal, error) { + var sig int + _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) + if err != 0 { + return -1, err + } + return ParentDeathSignal(sig), nil +} + +func SetKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { + return err + } + + return nil +} + +func ClearKeepCaps() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { + return err + } + + return nil +} + +func Setctty() error { + if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { + return err + } + return nil +} + +// RunningInUserNS detects whether we are currently running in a user namespace. +// Copied from github.com/lxc/lxd/shared/util.go +func RunningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if a == 0 && b == 0 && c == 4294967295 { + return false + } + return true +} + +// SetSubreaper sets the value i as the subreaper setting for the calling process +func SetSubreaper(i int) error { + return Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) +} + +func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { + _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go new file mode 100644 index 00000000..a0e96371 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go @@ -0,0 +1,43 @@ +package system + +import ( + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +// look in /proc to find the process start time so that we can verify +// that this pid has started after ourself +func GetProcessStartTime(pid int) (string, error) { + data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + if err != nil { + return "", err + } + return parseStartTime(string(data)) +} + +func parseStartTime(stat string) (string, error) { + // the starttime is located at pos 22 + // from the man page + // + // starttime %llu (was %lu before Linux 2.6) + // (22) The time the process started after system boot. In kernels before Linux 2.6, this + // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks + // (divide by sysconf(_SC_CLK_TCK)). + // + // NOTE: + // pos 2 could contain space and is inside `(` and `)`: + // (2) comm %s + // The filename of the executable, in parentheses. + // This is visible whether or not the executable is + // swapped out. + // + // the following is an example: + // 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 + + // get parts after last `)`: + s := strings.Split(stat, ")") + parts := strings.Split(strings.TrimSpace(s[len(s)-1]), " ") + return parts[22-3], nil // starts at 3 (after the filename pos `2`) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go new file mode 100644 index 00000000..615ff4c8 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go @@ -0,0 +1,40 @@ +package system + +import ( + "fmt" + "runtime" + "syscall" +) + +// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 +// +// We need different setns values for the different platforms and arch +// We are declaring the macro here because the SETNS syscall does not exist in th stdlib +var setNsMap = map[string]uintptr{ + "linux/386": 346, + "linux/arm64": 268, + "linux/amd64": 308, + "linux/arm": 375, + "linux/ppc": 350, + "linux/ppc64": 350, + "linux/ppc64le": 350, + "linux/s390x": 339, +} + +var sysSetns = setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] + +func SysSetns() uint32 { + return uint32(sysSetns) +} + +func Setns(fd uintptr, flags uintptr) error { + ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] + if !exists { + return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) + } + _, _, err := syscall.RawSyscall(ns, fd, flags, 0) + if err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go new file mode 100644 index 00000000..bb44d895 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go @@ -0,0 +1,25 @@ +// +build linux,386 + +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go new file mode 100644 index 00000000..0816bf82 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go @@ -0,0 +1,25 @@ +// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x + +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go new file mode 100644 index 00000000..3f780f31 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go @@ -0,0 +1,25 @@ +// +build linux,arm + +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go new file mode 100644 index 00000000..b3a07cba --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go @@ -0,0 +1,12 @@ +// +build cgo,linux cgo,freebsd + +package system + +/* +#include +*/ +import "C" + +func GetClockTicks() int { + return int(C.sysconf(C._SC_CLK_TCK)) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go new file mode 100644 index 00000000..d93b5d5f --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go @@ -0,0 +1,15 @@ +// +build !cgo windows + +package system + +func GetClockTicks() int { + // TODO figure out a better alternative for platforms where we're missing cgo + // + // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency(). + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx + // + // An example of its usage can be found here. + // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx + + return 100 +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go new file mode 100644 index 00000000..e7cfd62b --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go @@ -0,0 +1,9 @@ +// +build !linux + +package system + +// RunningInUserNS is a stub for non-Linux systems +// Always returns false +func RunningInUserNS() bool { + return false +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go new file mode 100644 index 00000000..30f74dfb --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go @@ -0,0 +1,99 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var _zero uintptr + +// Returns the size of xattrs and nil error +// Requires path, takes allocated []byte or nil as last argument +func Llistxattr(path string, dest []byte) (size int, err error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return -1, err + } + var newpathBytes unsafe.Pointer + if len(dest) > 0 { + newpathBytes = unsafe.Pointer(&dest[0]) + } else { + newpathBytes = unsafe.Pointer(&_zero) + } + + _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0) + size = int(_size) + if errno != 0 { + return -1, errno + } + + return size, nil +} + +// Returns a []byte slice if the xattr is set and nil otherwise +// Requires path and its attribute as arguments +func Lgetxattr(path string, attr string) ([]byte, error) { + var sz int + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + // Start with a 128 length byte array + sz = 128 + dest := make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + + switch { + case errno == syscall.ENODATA: + return nil, errno + case errno == syscall.ENOTSUP: + return nil, errno + case errno == syscall.ERANGE: + // 128 byte array might just not be good enough, + // A dummy buffer is used ``uintptr(0)`` to get real size + // of the xattrs on disk + _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0) + sz = int(_sz) + if sz < 0 { + return nil, errno + } + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno != 0 { + return nil, errno + } + case errno != 0: + return nil, errno + } + sz = int(_sz) + return dest[:sz], nil +} + +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml new file mode 100644 index 00000000..a6a98db8 --- /dev/null +++ b/vendor/github.com/pborman/uuid/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - release + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS new file mode 100644 index 00000000..b382a04e --- /dev/null +++ b/vendor/github.com/pborman/uuid/CONTRIBUTORS @@ -0,0 +1 @@ +Paul Borman diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE new file mode 100644 index 00000000..5dc68268 --- /dev/null +++ b/vendor/github.com/pborman/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md new file mode 100644 index 00000000..f023d47c --- /dev/null +++ b/vendor/github.com/pborman/uuid/README.md @@ -0,0 +1,13 @@ +This project was automatically exported from code.google.com/p/go-uuid + +# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on [RFC 412](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. + +###### Install +`go get github.com/pborman/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) + +Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: +http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go new file mode 100755 index 00000000..50a0f2d0 --- /dev/null +++ b/vendor/github.com/pborman/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go new file mode 100755 index 00000000..d8bd013e --- /dev/null +++ b/vendor/github.com/pborman/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go new file mode 100644 index 00000000..a0420c1e --- /dev/null +++ b/vendor/github.com/pborman/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/pborman/uuid/json.go b/vendor/github.com/pborman/uuid/json.go new file mode 100644 index 00000000..9dda1dfb --- /dev/null +++ b/vendor/github.com/pborman/uuid/json.go @@ -0,0 +1,34 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "errors" + +func (u UUID) MarshalJSON() ([]byte, error) { + if len(u) != 16 { + return []byte(`""`), nil + } + var js [38]byte + js[0] = '"' + encodeHex(js[1:], u) + js[37] = '"' + return js[:], nil +} + +func (u *UUID) UnmarshalJSON(data []byte) error { + if string(data) == `""` { + return nil + } + if data[0] != '"' { + return errors.New("invalid UUID format") + } + data = data[1 : len(data)-1] + uu := Parse(string(data)) + if uu == nil { + return errors.New("invalid UUID format") + } + *u = uu + return nil +} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go new file mode 100755 index 00000000..42d60da8 --- /dev/null +++ b/vendor/github.com/pborman/uuid/node.go @@ -0,0 +1,117 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "net" + "sync" +) + +var ( + nodeMu sync.Mutex + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == nil { + setNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go new file mode 100644 index 00000000..c84f900d --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql.go @@ -0,0 +1,58 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + *uuid = UUID(b) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go new file mode 100755 index 00000000..eedf2421 --- /dev/null +++ b/vendor/github.com/pborman/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clock_seq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go new file mode 100644 index 00000000..fc8e052c --- /dev/null +++ b/vendor/github.com/pborman/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go new file mode 100644 index 00000000..c4482cd8 --- /dev/null +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -0,0 +1,176 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + var uuid [16]byte + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid[:] +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if len(uuid) != 16 { + return "" + } + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if len(uuid) != 16 { + return "" + } + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go new file mode 100644 index 00000000..0127eacf --- /dev/null +++ b/vendor/github.com/pborman/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, seq, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go new file mode 100644 index 00000000..b3d4a368 --- /dev/null +++ b/vendor/github.com/pborman/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go new file mode 100644 index 00000000..c31df062 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -0,0 +1,329 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "time" +) + +const ( + blockSize = 512 + + // Types + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + Xattrs map[string]string +} + +// File name constants from the tar spec. +const ( + fileNameSize = 100 // Maximum number of bytes in a standard tar name. + fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. +) + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + // symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +var zeroBlock = make([]byte, blockSize) + +// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. +// We compute and return both. +func checksum(header []byte) (unsigned int64, signed int64) { + for i := 0; i < len(header); i++ { + if i == 148 { + // The chksum field (header[148:156]) is special: it should be treated as space bytes. + unsigned += ' ' * 8 + signed += ' ' * 8 + i += 7 + continue + } + unsigned += int64(header[i]) + signed += int64(int8(header[i])) + } + return +} + +type slicer []byte + +func (sp *slicer) next(n int) (b []byte) { + s := *sp + b, *sp = s[0:n], s[n:] + return +} + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go new file mode 100644 index 00000000..4168ea2c --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -0,0 +1,943 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +const maxNanoSecondIntSize = 9 + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + err error + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry + hdrBuff [blockSize]byte // buffer to use in readHeader + + RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this. + rawBytes *bytes.Buffer // last raw bits +} + +// RawBytes accesses the raw bytes of the archive, apart from the file payload itself. +// This includes the header and padding. +// +// This call resets the current rawbytes buffer +// +// Only when RawAccounting is enabled, otherwise this returns nil +func (tr *Reader) RawBytes() []byte { + if !tr.RawAccounting { + return nil + } + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } + // if we've read them, then flush them. + defer tr.rawBytes.Reset() + return tr.rawBytes.Bytes() +} + +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive. +type sparseFileReader struct { + rfr *regFileReader // reads the sparse-encoded file data + sp []sparseEntry // the sparse map for the file + pos int64 // keeps track of file position + tot int64 // total size of the file +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// Keywords for old GNU sparse headers +const ( + oldGNUSparseMainHeaderOffset = 386 + oldGNUSparseMainHeaderIsExtendedOffset = 482 + oldGNUSparseMainHeaderNumEntries = 4 + oldGNUSparseExtendedHeaderIsExtendedOffset = 504 + oldGNUSparseExtendedHeaderNumEntries = 21 + oldGNUSparseOffsetSize = 12 + oldGNUSparseNumBytesSize = 12 +) + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + var hdr *Header + if tr.RawAccounting { + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } else { + tr.rawBytes.Reset() + } + } + if tr.err == nil { + tr.skipUnread() + } + if tr.err != nil { + return hdr, tr.err + } + hdr = tr.readHeader() + if hdr == nil { + return hdr, tr.err + } + // Check for PAX/GNU header. + switch hdr.Typeflag { + case TypeXHeader: + // PAX extended header + headers, err := parsePAX(tr) + if err != nil { + return nil, err + } + // We actually read the whole file, + // but this skips alignment padding + tr.skipUnread() + if tr.err != nil { + return nil, tr.err + } + hdr = tr.readHeader() + if hdr == nil { + return nil, tr.err + } + mergePAX(hdr, headers) + + // Check for a PAX format sparse file + sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers) + if err != nil { + tr.err = err + return nil, err + } + if sp != nil { + // Current file is a PAX format GNU sparse file. + // Set the current file reader to a sparse file reader. + tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} + } + return hdr, nil + case TypeGNULongName: + // We have a GNU long name header. Its contents are the real file name. + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + var buf []byte + if tr.RawAccounting { + if _, err = tr.rawBytes.Write(realname); err != nil { + return nil, err + } + buf = make([]byte, tr.rawBytes.Len()) + copy(buf[:], tr.RawBytes()) + } + hdr, err := tr.Next() + // since the above call to Next() resets the buffer, we need to throw the bytes over + if tr.RawAccounting { + buf = append(buf, tr.RawBytes()...) + if _, err = tr.rawBytes.Write(buf); err != nil { + return nil, err + } + } + hdr.Name = cString(realname) + return hdr, err + case TypeGNULongLink: + // We have a GNU long link header. + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + var buf []byte + if tr.RawAccounting { + if _, err = tr.rawBytes.Write(realname); err != nil { + return nil, err + } + buf = make([]byte, tr.rawBytes.Len()) + copy(buf[:], tr.RawBytes()) + } + hdr, err := tr.Next() + // since the above call to Next() resets the buffer, we need to throw the bytes over + if tr.RawAccounting { + buf = append(buf, tr.RawBytes()...) + if _, err = tr.rawBytes.Write(buf); err != nil { + return nil, err + } + } + hdr.Linkname = cString(realname) + return hdr, err + } + return hdr, tr.err +} + +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) error { + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxGname: + hdr.Gname = v + case paxUname: + hdr.Uname = v + case paxUid: + uid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Uid = int(uid) + case paxGid: + gid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Gid = int(gid) + case paxAtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.AccessTime = t + case paxMtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ModTime = t + case paxCtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ChangeTime = t + case paxSize: + size, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Size = int64(size) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } + } + } + return nil +} + +// parsePAXTime takes a string of the form %d.%d as described in +// the PAX specification. +func parsePAXTime(t string) (time.Time, error) { + buf := []byte(t) + pos := bytes.IndexByte(buf, '.') + var seconds, nanoseconds int64 + var err error + if pos == -1 { + seconds, err = strconv.ParseInt(t, 10, 0) + if err != nil { + return time.Time{}, err + } + } else { + seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) + if err != nil { + return time.Time{}, err + } + nano_buf := string(buf[pos+1:]) + // Pad as needed before converting to a decimal. + // For example .030 -> .030000000 -> 30000000 nanoseconds + if len(nano_buf) < maxNanoSecondIntSize { + // Right pad + nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) + } else if len(nano_buf) > maxNanoSecondIntSize { + // Right truncate + nano_buf = nano_buf[:maxNanoSecondIntSize] + } + nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) + if err != nil { + return time.Time{}, err + } + } + ts := time.Unix(seconds, nanoseconds) + return ts, nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + // leaving this function for io.Reader makes it more testable + if tr, ok := r.(*Reader); ok && tr.RawAccounting { + if _, err = tr.rawBytes.Write(buf); err != nil { + return nil, err + } + } + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. + var sparseMap bytes.Buffer + + headers := make(map[string]string) + // Each record is constructed as + // "%d %s=%s\n", length, keyword, value + for len(buf) > 0 { + // or the header was empty to start with. + var sp int + // The size field ends at the first space. + sp = bytes.IndexByte(buf, ' ') + if sp == -1 { + return nil, ErrHeader + } + // Parse the first token as a decimal integer. + n, err := strconv.ParseInt(string(buf[:sp]), 10, 0) + if err != nil || n < 5 || int64(len(buf)) < n { + return nil, ErrHeader + } + // Extract everything between the decimal and the n -1 on the + // beginning to eat the ' ', -1 on the end to skip the newline. + var record []byte + record, buf = buf[sp+1:n-1], buf[n:] + // The first equals is guaranteed to mark the end of the key. + // Everything else is value. + eq := bytes.IndexByte(record, '=') + if eq == -1 { + return nil, ErrHeader + } + key, value := record[:eq], record[eq+1:] + + keyStr := string(key) + if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { + // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. + sparseMap.Write(value) + sparseMap.Write([]byte{','}) + } else { + // Normal key. Set the value in the headers map. + headers[keyStr] = string(value) + } + } + if sparseMap.Len() != 0 { + // Add sparse info to headers, chopping off the extra comma + sparseMap.Truncate(sparseMap.Len() - 1) + headers[paxGNUSparseMap] = sparseMap.String() + } + return headers, nil +} + +// cString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func cString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +func (tr *Reader) octal(b []byte) int64 { + // Check for binary format first. + if len(b) > 0 && b[0]&0x80 != 0 { + var x int64 + for i, c := range b { + if i == 0 { + c &= 0x7f // ignore signal bit in first byte + } + x = x<<8 | int64(c) + } + return x + } + + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, err := strconv.ParseUint(cString(b), 8, 64) + if err != nil { + tr.err = err + } + return int64(x) +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding. +func (tr *Reader) skipUnread() { + nr := tr.numBytes() + tr.pad // number of bytes to skip + tr.curr, tr.pad = nil, 0 + if tr.RawAccounting { + _, tr.err = io.CopyN(tr.rawBytes, tr.r, nr) + return + } + if sr, ok := tr.r.(io.Seeker); ok { + if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil { + return + } + } + _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr) +} + +func (tr *Reader) verifyChecksum(header []byte) bool { + if tr.err != nil { + return false + } + + given := tr.octal(header[148:156]) + unsigned, signed := checksum(header) + return given == unsigned || given == signed +} + +func (tr *Reader) readHeader() *Header { + header := tr.hdrBuff[:] + copy(header, zeroBlock) + + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + // because it could read some of the block, but reach EOF first + if tr.err == io.EOF && tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + return nil + } + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + + // Two blocks of zero bytes marks the end of the archive. + if bytes.Equal(header, zeroBlock[0:blockSize]) { + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + // because it could read some of the block, but reach EOF first + if tr.err == io.EOF && tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + return nil + } + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + if bytes.Equal(header, zeroBlock[0:blockSize]) { + tr.err = io.EOF + } else { + tr.err = ErrHeader // zero block and then non-zero block + } + return nil + } + + if !tr.verifyChecksum(header) { + tr.err = ErrHeader + return nil + } + + // Unpack + hdr := new(Header) + s := slicer(header) + + hdr.Name = cString(s.next(100)) + hdr.Mode = tr.octal(s.next(8)) + hdr.Uid = int(tr.octal(s.next(8))) + hdr.Gid = int(tr.octal(s.next(8))) + hdr.Size = tr.octal(s.next(12)) + if hdr.Size < 0 { + tr.err = ErrHeader + return nil + } + hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0) + s.next(8) // chksum + hdr.Typeflag = s.next(1)[0] + hdr.Linkname = cString(s.next(100)) + + // The remainder of the header depends on the value of magic. + // The original (v7) version of tar had no explicit magic field, + // so its magic bytes, like the rest of the block, are NULs. + magic := string(s.next(8)) // contains version field as well. + var format string + switch { + case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) + if string(header[508:512]) == "tar\x00" { + format = "star" + } else { + format = "posix" + } + case magic == "ustar \x00": // old GNU tar + format = "gnu" + } + + switch format { + case "posix", "gnu", "star": + hdr.Uname = cString(s.next(32)) + hdr.Gname = cString(s.next(32)) + devmajor := s.next(8) + devminor := s.next(8) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = tr.octal(devmajor) + hdr.Devminor = tr.octal(devminor) + } + var prefix string + switch format { + case "posix", "gnu": + prefix = cString(s.next(155)) + case "star": + prefix = cString(s.next(131)) + hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0) + hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + + if tr.err != nil { + tr.err = ErrHeader + return nil + } + + // Maximum value of hdr.Size is 64 GB (12 octal digits), + // so there's no risk of int64 overflowing. + nb := int64(hdr.Size) + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + + // Set the current file reader. + tr.curr = ®FileReader{r: tr.r, nb: nb} + + // Check for old GNU sparse format entry. + if hdr.Typeflag == TypeGNUSparse { + // Get the real size of the file. + hdr.Size = tr.octal(header[483:495]) + + // Read the sparse map. + sp := tr.readOldGNUSparseMap(header) + if tr.err != nil { + return nil + } + // Current file is a GNU sparse file. Update the current file reader. + tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} + } + + return hdr +} + +// A sparseEntry holds a single entry in a sparse file's sparse map. +// A sparse entry indicates the offset and size in a sparse file of a +// block of data. +type sparseEntry struct { + offset int64 + numBytes int64 +} + +// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, +// then one or more extension headers are used to store the rest of the sparse map. +func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { + isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 + spCap := oldGNUSparseMainHeaderNumEntries + if isExtended { + spCap += oldGNUSparseExtendedHeaderNumEntries + } + sp := make([]sparseEntry, 0, spCap) + s := slicer(header[oldGNUSparseMainHeaderOffset:]) + + // Read the four entries from the main tar header + for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { + offset := tr.octal(s.next(oldGNUSparseOffsetSize)) + numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) + if tr.err != nil { + tr.err = ErrHeader + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + for isExtended { + // There are more entries. Read an extension header and parse its entries. + sparseHeader := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { + return nil + } + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(sparseHeader); tr.err != nil { + return nil + } + } + + isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 + s = slicer(sparseHeader) + for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { + offset := tr.octal(s.next(oldGNUSparseOffsetSize)) + numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) + if tr.err != nil { + tr.err = ErrHeader + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + } + return sp +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0. +// The sparse map is stored just before the file data and padded out to the nearest block boundary. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + buf := make([]byte, 2*blockSize) + sparseHeader := buf[:blockSize] + + // readDecimal is a helper function to read a decimal integer from the sparse map + // while making sure to read from the file in blocks of size blockSize + readDecimal := func() (int64, error) { + // Look for newline + nl := bytes.IndexByte(sparseHeader, '\n') + if nl == -1 { + if len(sparseHeader) >= blockSize { + // This is an error + return 0, ErrHeader + } + oldLen := len(sparseHeader) + newLen := oldLen + blockSize + if cap(sparseHeader) < newLen { + // There's more header, but we need to make room for the next block + copy(buf, sparseHeader) + sparseHeader = buf[:newLen] + } else { + // There's more header, and we can just reslice + sparseHeader = sparseHeader[:newLen] + } + + // Now that sparseHeader is large enough, read next block + if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil { + return 0, err + } + // leaving this function for io.Reader makes it more testable + if tr, ok := r.(*Reader); ok && tr.RawAccounting { + if _, err := tr.rawBytes.Write(sparseHeader[oldLen:newLen]); err != nil { + return 0, err + } + } + + // Look for a newline in the new data + nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n') + if nl == -1 { + // This is an error + return 0, ErrHeader + } + nl += oldLen // We want the position from the beginning + } + // Now that we've found a newline, read a number + n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0) + if err != nil { + return 0, ErrHeader + } + + // Update sparseHeader to consume this number + sparseHeader = sparseHeader[nl+1:] + return n, nil + } + + // Read the first block + if _, err := io.ReadFull(r, sparseHeader); err != nil { + return nil, err + } + // leaving this function for io.Reader makes it more testable + if tr, ok := r.(*Reader); ok && tr.RawAccounting { + if _, err := tr.rawBytes.Write(sparseHeader); err != nil { + return nil, err + } + } + + // The first line contains the number of entries + numEntries, err := readDecimal() + if err != nil { + return nil, err + } + + // Read all the entries + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + // Read the offset + offset, err := readDecimal() + if err != nil { + return nil, err + } + // Read numBytes + numBytes, err := readDecimal() + if err != nil { + return nil, err + } + + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1. +// The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) { + // Get number of entries + numEntriesStr, ok := headers[paxGNUSparseNumBlocks] + if !ok { + return nil, ErrHeader + } + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) + if err != nil { + return nil, ErrHeader + } + + sparseMap := strings.Split(headers[paxGNUSparseMap], ",") + + // There should be two numbers in sparseMap for each entry + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +func (tr *Reader) Read(b []byte) (n int, err error) { + if tr.curr == nil { + return 0, io.EOF + } + n, err = tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { + // file consumed + return 0, io.EOF + } + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] + } + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) + + if err == io.EOF && rfr.nb > 0 { + err = io.ErrUnexpectedEOF + } + return +} + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// readHole reads a sparse file hole ending at offset toOffset +func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int { + n64 := toOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + if len(sfr.sp) == 0 { + // No more data fragments to read from. + if sfr.pos < sfr.tot { + // We're in the last hole + n = sfr.readHole(b, sfr.tot) + return + } + // Otherwise, we're at the end of the file + return 0, io.EOF + } + if sfr.tot < sfr.sp[0].offset { + return 0, io.ErrUnexpectedEOF + } + if sfr.pos < sfr.sp[0].offset { + // We're in a hole + n = sfr.readHole(b, sfr.sp[0].offset) + return + } + + // We're not in a hole, so we'll read from the next data fragment + posInFragment := sfr.pos - sfr.sp[0].offset + bytesLeft := sfr.sp[0].numBytes - posInFragment + if int64(len(b)) > bytesLeft { + b = b[0:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + + if int64(n) == bytesLeft { + // We're done with this fragment + sfr.sp = sfr.sp[1:] + } + + if err == io.EOF && sfr.pos < sfr.tot { + // We reached the end of the last fragment's data, but there's a final hole + err = nil + } + return +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.nb +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go new file mode 100644 index 00000000..cf9cc79c --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go new file mode 100644 index 00000000..6f17dbe3 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go new file mode 100644 index 00000000..cb843db4 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go new file mode 100644 index 00000000..9dbc01a2 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go @@ -0,0 +1,396 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errNameTooLong = errors.New("archive/tar: name too long") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use pax header instead of binary numeric header + hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header + paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +// Write s into b, terminating it with a NUL if there is room. +// If the value is too long for the field and allowPax is true add a paxheader record instead +func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) { + needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + if len(s) > len(b) { + if tw.err == nil { + tw.err = ErrFieldTooLong + } + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// Encode x as an octal ASCII string and write it into b with leading zeros. +func (tw *Writer) octal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // leading zeros, but leave room for a NUL. + for len(s)+1 < len(b) { + s = "0" + s + } + tw.cString(b, s, false, paxNone, nil) +} + +// Write x into b, either as octal or as binary (GNUtar/star extension). +// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead +func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + tw.octal(b, x) + return + } + + // If it is too long for octal, and pax is preferred, use a pax header + if allowPax && tw.preferPax { + tw.octal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + // Too big: use binary (big-endian). + tw.usedBinary = true + for i := len(b) - 1; x > 0 && i >= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // highest bit indicates binary format +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(shanemhansen): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + var header []byte + + // We need to select which scratch buffer to use carefully, + // since this method is called recursively to write PAX headers. + // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. + // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is + // already being used by the non-recursive call, so we must use paxHdrBuff. + header = tw.hdrBuff[:] + if !allowPax { + header = tw.paxHdrBuff[:] + } + copy(header, zeroBlock) + s := slicer(header) + + // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + pathHeaderBytes := s.next(fileNameSize) + + tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders) + + // Handle out of range ModTime carefully. + var modTime int64 + if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { + modTime = hdr.ModTime.Unix() + } + + tw.octal(s.next(8), hdr.Mode) // 100:108 + tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116 + tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124 + tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136 + tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity + s.next(8) // chksum (148:156) + s.next(1)[0] = hdr.Typeflag // 156:157 + + tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders) + + copy(s.next(8), []byte("ustar\x0000")) // 257:265 + tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297 + tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329 + tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337 + tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345 + + // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + prefixHeaderBytes := s.next(155) + tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix + + // Use the GNU magic instead of POSIX magic if we used any GNU extensions. + if tw.usedBinary { + copy(header[257:265], []byte("ustar \x00")) + } + + _, paxPathUsed := paxHeaders[paxPath] + // try to use a ustar header when only the name is too long + if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + suffix := hdr.Name + prefix := "" + if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) { + var err error + prefix, suffix, err = tw.splitUSTARLongName(hdr.Name) + if err == nil { + // ok we can use a ustar long name instead of pax, now correct the fields + + // remove the path field from the pax header. this will suppress the pax header + delete(paxHeaders, paxPath) + + // update the path fields + tw.cString(pathHeaderBytes, suffix, false, paxNone, nil) + tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil) + + // Use the ustar magic if we used ustar long names. + if len(prefix) > 0 && !tw.usedBinary { + copy(header[257:265], []byte("ustar\x00")) + } + } + } + } + + // The chksum field is terminated by a NUL and a space. + // This is different from the other octal fields. + chksum, _ := checksum(header) + tw.octal(header[148:155], chksum) + header[155] = ' ' + + if tw.err != nil { + // problem with header; probably integer too big for a field. + return tw.err + } + + if allowPax { + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = int64(hdr.Size) + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header) + return tw.err +} + +// writeUSTARLongName splits a USTAR long name hdr.Name. +// name must be < 256 characters. errNameTooLong is returned +// if hdr.Name can't be split. The splitting heuristic +// is compatible with gnu tar. +func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) { + length := len(name) + if length > fileNamePrefixSize+1 { + length = fileNamePrefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + i := strings.LastIndex(name[:length], "/") + // nlen contains the resulting length in the name field. + // plen contains the resulting length in the prefix field. + nlen := len(name) - i - 1 + plen := i + if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { + err = errNameTooLong + return + } + prefix, suffix = name[:i], name[i+1:] + return +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. + pid := os.Getpid() + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, + fmt.Sprintf("PaxHeaders.%d", pid), file) + + ascii := toASCII(fullName) + if len(ascii) > 100 { + ascii = ascii[:100] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + for k, v := range paxHeaders { + fmt.Fprint(&buf, paxHeader(k+"="+v)) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// paxHeader formats a single pax record, prefixing it with the appropriate length +func paxHeader(msg string) string { + const padding = 2 // Extra padding for space and newline + size := len(msg) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s\n", size, msg) + if len(record) != size { + // Final adjustment if adding size increased + // the number of digits in size + size = len(record) + record = fmt.Sprintf("%d %s\n", size, msg) + } + return record +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteAfterClose + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock) + if tw.err != nil { + break + } + } + return tw.err +} diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/README.md b/vendor/github.com/vbatts/tar-split/tar/asm/README.md new file mode 100644 index 00000000..2a3a5b56 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/asm/README.md @@ -0,0 +1,44 @@ +asm +=== + +This library for assembly and disassembly of tar archives, facilitated by +`github.com/vbatts/tar-split/tar/storage`. + + +Concerns +-------- + +For completely safe assembly/disassembly, there will need to be a Content +Addressable Storage (CAS) directory, that maps to a checksum in the +`storage.Entity` of `storage.FileType`. + +This is due to the fact that tar archives _can_ allow multiple records for the +same path, but the last one effectively wins. Even if the prior records had a +different payload. + +In this way, when assembling an archive from relative paths, if the archive has +multiple entries for the same path, then all payloads read in from a relative +path would be identical. + + +Thoughts +-------- + +Have a look-aside directory or storage. This way when a clobbering record is +encountered from the tar stream, then the payload of the prior/existing file is +stored to the CAS. This way the clobbering record's file payload can be +extracted, but we'll have preserved the payload needed to reassemble a precise +tar archive. + +clobbered/path/to/file.[0-N] + +*alternatively* + +We could just _not_ support tar streams that have clobbering file paths. +Appending records to the archive is not incredibly common, and doesn't happen +by default for most implementations. Not supporting them wouldn't be a +security concern either, as if it did occur, we would reassemble an archive +that doesn't validate signature/checksum, so it shouldn't be trusted anyway. + +Otherwise, this will allow us to defer support for appended files as a FUTURE FEATURE. + diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go new file mode 100644 index 00000000..d624450a --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go @@ -0,0 +1,130 @@ +package asm + +import ( + "bytes" + "fmt" + "hash" + "hash/crc64" + "io" + "sync" + + "github.com/vbatts/tar-split/tar/storage" +) + +// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive +// stream. +// +// It takes a storage.FileGetter, for mapping the file payloads that are to be read in, +// and a storage.Unpacker, which has access to the rawbytes and file order +// metadata. With the combination of these two items, a precise assembled Tar +// archive is possible. +func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser { + // ... Since these are interfaces, this is possible, so let's not have a nil pointer + if fg == nil || up == nil { + return nil + } + pr, pw := io.Pipe() + go func() { + err := WriteOutputTarStream(fg, up, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + return pr +} + +// WriteOutputTarStream writes assembled tar archive to a writer. +func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Writer) error { + // ... Since these are interfaces, this is possible, so let's not have a nil pointer + if fg == nil || up == nil { + return nil + } + var copyBuffer []byte + var crcHash hash.Hash + var crcSum []byte + var multiWriter io.Writer + for { + entry, err := up.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + switch entry.Type { + case storage.SegmentType: + if _, err := w.Write(entry.Payload); err != nil { + return err + } + case storage.FileType: + if entry.Size == 0 { + continue + } + fh, err := fg.Get(entry.GetName()) + if err != nil { + return err + } + if crcHash == nil { + crcHash = crc64.New(storage.CRCTable) + crcSum = make([]byte, 8) + multiWriter = io.MultiWriter(w, crcHash) + copyBuffer = byteBufferPool.Get().([]byte) + defer byteBufferPool.Put(copyBuffer) + } else { + crcHash.Reset() + } + + if _, err := copyWithBuffer(multiWriter, fh, copyBuffer); err != nil { + fh.Close() + return err + } + + if !bytes.Equal(crcHash.Sum(crcSum[:0]), entry.Payload) { + // I would rather this be a comparable ErrInvalidChecksum or such, + // but since it's coming through the PipeReader, the context of + // _which_ file would be lost... + fh.Close() + return fmt.Errorf("file integrity checksum failed for %q", entry.GetName()) + } + fh.Close() + } + } +} + +var byteBufferPool = &sync.Pool{ + New: func() interface{} { + return make([]byte, 32*1024) + }, +} + +// copyWithBuffer is taken from stdlib io.Copy implementation +// https://github.com/golang/go/blob/go1.5.1/src/io/io.go#L367 +func copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go new file mode 100644 index 00000000..54ef23ae --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go @@ -0,0 +1,141 @@ +package asm + +import ( + "io" + "io/ioutil" + + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/storage" +) + +// NewInputTarStream wraps the Reader stream of a tar archive and provides a +// Reader stream of the same. +// +// In the middle it will pack the segments and file metadata to storage.Packer +// `p`. +// +// The the storage.FilePutter is where payload of files in the stream are +// stashed. If this stashing is not needed, you can provide a nil +// storage.FilePutter. Since the checksumming is still needed, then a default +// of NewDiscardFilePutter will be used internally +func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io.Reader, error) { + // What to do here... folks will want their own access to the Reader that is + // their tar archive stream, but we'll need that same stream to use our + // forked 'archive/tar'. + // Perhaps do an io.TeeReader that hands back an io.Reader for them to read + // from, and we'll MITM the stream to store metadata. + // We'll need a storage.FilePutter too ... + + // Another concern, whether to do any storage.FilePutter operations, such that we + // don't extract any amount of the archive. But then again, we're not making + // files/directories, hardlinks, etc. Just writing the io to the storage.FilePutter. + // Perhaps we have a DiscardFilePutter that is a bit bucket. + + // we'll return the pipe reader, since TeeReader does not buffer and will + // only read what the outputRdr Read's. Since Tar archives have padding on + // the end, we want to be the one reading the padding, even if the user's + // `archive/tar` doesn't care. + pR, pW := io.Pipe() + outputRdr := io.TeeReader(r, pW) + + // we need a putter that will generate the crc64 sums of file payloads + if fp == nil { + fp = storage.NewDiscardFilePutter() + } + + go func() { + tr := tar.NewReader(outputRdr) + tr.RawAccounting = true + for { + hdr, err := tr.Next() + if err != nil { + if err != io.EOF { + pW.CloseWithError(err) + return + } + // even when an EOF is reached, there is often 1024 null bytes on + // the end of an archive. Collect them too. + if b := tr.RawBytes(); len(b) > 0 { + _, err := p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + break // not return. We need the end of the reader. + } + if hdr == nil { + break // not return. We need the end of the reader. + } + + if b := tr.RawBytes(); len(b) > 0 { + _, err := p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + + var csum []byte + if hdr.Size > 0 { + var err error + _, csum, err = fp.Put(hdr.Name, tr) + if err != nil { + pW.CloseWithError(err) + return + } + } + + entry := storage.Entry{ + Type: storage.FileType, + Size: hdr.Size, + Payload: csum, + } + // For proper marshalling of non-utf8 characters + entry.SetName(hdr.Name) + + // File entries added, regardless of size + _, err = p.AddEntry(entry) + if err != nil { + pW.CloseWithError(err) + return + } + + if b := tr.RawBytes(); len(b) > 0 { + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + } + + // it is allowable, and not uncommon that there is further padding on the + // end of an archive, apart from the expected 1024 null bytes. + remainder, err := ioutil.ReadAll(outputRdr) + if err != nil && err != io.EOF { + pW.CloseWithError(err) + return + } + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: remainder, + }) + if err != nil { + pW.CloseWithError(err) + return + } + pW.Close() + }() + + return pR, nil +} diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/doc.go b/vendor/github.com/vbatts/tar-split/tar/asm/doc.go new file mode 100644 index 00000000..4367b902 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/asm/doc.go @@ -0,0 +1,9 @@ +/* +Package asm provides the API for streaming assembly and disassembly of tar +archives. + +Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the +metadata for a stream, as well as an implementation of Getting/Putting the file +entries' payload. +*/ +package asm diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/doc.go b/vendor/github.com/vbatts/tar-split/tar/storage/doc.go new file mode 100644 index 00000000..83f7089f --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/storage/doc.go @@ -0,0 +1,12 @@ +/* +Package storage is for metadata of a tar archive. + +Packing and unpacking the Entries of the stream. The types of streams are +either segments of raw bytes (for the raw headers and various padding) and for +an entry marking a file payload. + +The raw bytes are stored precisely in the packed (marshalled) Entry, whereas +the file payload marker include the name of the file, size, and crc64 checksum +(for basic file integrity). +*/ +package storage diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/entry.go b/vendor/github.com/vbatts/tar-split/tar/storage/entry.go new file mode 100644 index 00000000..c91e7ea1 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/storage/entry.go @@ -0,0 +1,78 @@ +package storage + +import "unicode/utf8" + +// Entries is for sorting by Position +type Entries []Entry + +func (e Entries) Len() int { return len(e) } +func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position } + +// Type of Entry +type Type int + +const ( + // FileType represents a file payload from the tar stream. + // + // This will be used to map to relative paths on disk. Only Size > 0 will get + // read into a resulting output stream (due to hardlinks). + FileType Type = 1 + iota + // SegmentType represents a raw bytes segment from the archive stream. These raw + // byte segments consist of the raw headers and various padding. + // + // Its payload is to be marshalled base64 encoded. + SegmentType +) + +// Entry is the structure for packing and unpacking the information read from +// the Tar archive. +// +// FileType Payload checksum is using `hash/crc64` for basic file integrity, +// _not_ for cryptography. +// From http://www.backplane.com/matt/crc64.html, CRC32 has almost 40,000 +// collisions in a sample of 18.2 million, CRC64 had none. +type Entry struct { + Type Type `json:"type"` + Name string `json:"name,omitempty"` + NameRaw []byte `json:"name_raw,omitempty"` + Size int64 `json:"size,omitempty"` + Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here; + Position int `json:"position"` +} + +// SetName will check name for valid UTF-8 string, and set the appropriate +// field. See https://github.com/vbatts/tar-split/issues/17 +func (e *Entry) SetName(name string) { + if utf8.ValidString(name) { + e.Name = name + } else { + e.NameRaw = []byte(name) + } +} + +// SetNameBytes will check name for valid UTF-8 string, and set the appropriate +// field +func (e *Entry) SetNameBytes(name []byte) { + if utf8.Valid(name) { + e.Name = string(name) + } else { + e.NameRaw = name + } +} + +// GetName returns the string for the entry's name, regardless of the field stored in +func (e *Entry) GetName() string { + if len(e.NameRaw) > 0 { + return string(e.NameRaw) + } + return e.Name +} + +// GetNameBytes returns the bytes for the entry's name, regardless of the field stored in +func (e *Entry) GetNameBytes() []byte { + if len(e.NameRaw) > 0 { + return e.NameRaw + } + return []byte(e.Name) +} diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/getter.go b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go new file mode 100644 index 00000000..ae11f8ff --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go @@ -0,0 +1,104 @@ +package storage + +import ( + "bytes" + "errors" + "hash/crc64" + "io" + "os" + "path/filepath" +) + +// FileGetter is the interface for getting a stream of a file payload, +// addressed by name/filename. Presumably, the names will be scoped to relative +// file paths. +type FileGetter interface { + // Get returns a stream for the provided file path + Get(filename string) (output io.ReadCloser, err error) +} + +// FilePutter is the interface for storing a stream of a file payload, +// addressed by name/filename. +type FilePutter interface { + // Put returns the size of the stream received, and the crc64 checksum for + // the provided stream + Put(filename string, input io.Reader) (size int64, checksum []byte, err error) +} + +// FileGetPutter is the interface that groups both Getting and Putting file +// payloads. +type FileGetPutter interface { + FileGetter + FilePutter +} + +// NewPathFileGetter returns a FileGetter that is for files relative to path +// relpath. +func NewPathFileGetter(relpath string) FileGetter { + return &pathFileGetter{root: relpath} +} + +type pathFileGetter struct { + root string +} + +func (pfg pathFileGetter) Get(filename string) (io.ReadCloser, error) { + return os.Open(filepath.Join(pfg.root, filename)) +} + +type bufferFileGetPutter struct { + files map[string][]byte +} + +func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) { + if _, ok := bfgp.files[name]; !ok { + return nil, errors.New("no such file") + } + b := bytes.NewBuffer(bfgp.files[name]) + return &readCloserWrapper{b}, nil +} + +func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) { + crc := crc64.New(CRCTable) + buf := bytes.NewBuffer(nil) + cw := io.MultiWriter(crc, buf) + i, err := io.Copy(cw, r) + if err != nil { + return 0, nil, err + } + bfgp.files[name] = buf.Bytes() + return i, crc.Sum(nil), nil +} + +type readCloserWrapper struct { + io.Reader +} + +func (w *readCloserWrapper) Close() error { return nil } + +// NewBufferFileGetPutter is a simple in-memory FileGetPutter +// +// Implication is this is memory intensive... +// Probably best for testing or light weight cases. +func NewBufferFileGetPutter() FileGetPutter { + return &bufferFileGetPutter{ + files: map[string][]byte{}, + } +} + +// NewDiscardFilePutter is a bit bucket FilePutter +func NewDiscardFilePutter() FilePutter { + return &bitBucketFilePutter{} +} + +type bitBucketFilePutter struct { +} + +func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) { + c := crc64.New(CRCTable) + i, err := io.Copy(c, r) + return i, c.Sum(nil), err +} + +// CRCTable is the default table used for crc64 sum calculations +var CRCTable = crc64.MakeTable(crc64.ISO) diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/packer.go b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go new file mode 100644 index 00000000..aba69481 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go @@ -0,0 +1,127 @@ +package storage + +import ( + "encoding/json" + "errors" + "io" + "path/filepath" + "unicode/utf8" +) + +// ErrDuplicatePath occurs when a tar archive has more than one entry for the +// same file path +var ErrDuplicatePath = errors.New("duplicates of file paths not supported") + +// Packer describes the methods to pack Entries to a storage destination +type Packer interface { + // AddEntry packs the Entry and returns its position + AddEntry(e Entry) (int, error) +} + +// Unpacker describes the methods to read Entries from a source +type Unpacker interface { + // Next returns the next Entry being unpacked, or error, until io.EOF + Next() (*Entry, error) +} + +/* TODO(vbatts) figure out a good model for this +type PackUnpacker interface { + Packer + Unpacker +} +*/ + +type jsonUnpacker struct { + seen seenNames + dec *json.Decoder +} + +func (jup *jsonUnpacker) Next() (*Entry, error) { + var e Entry + err := jup.dec.Decode(&e) + if err != nil { + return nil, err + } + + // check for dup name + if e.Type == FileType { + cName := filepath.Clean(e.GetName()) + if _, ok := jup.seen[cName]; ok { + return nil, ErrDuplicatePath + } + jup.seen[cName] = struct{}{} + } + + return &e, err +} + +// NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and +// FileType) as a json document. +// +// Each Entry read are expected to be delimited by new line. +func NewJSONUnpacker(r io.Reader) Unpacker { + return &jsonUnpacker{ + dec: json.NewDecoder(r), + seen: seenNames{}, + } +} + +type jsonPacker struct { + w io.Writer + e *json.Encoder + pos int + seen seenNames +} + +type seenNames map[string]struct{} + +func (jp *jsonPacker) AddEntry(e Entry) (int, error) { + // if Name is not valid utf8, switch it to raw first. + if e.Name != "" { + if !utf8.ValidString(e.Name) { + e.NameRaw = []byte(e.Name) + e.Name = "" + } + } + + // check early for dup name + if e.Type == FileType { + cName := filepath.Clean(e.GetName()) + if _, ok := jp.seen[cName]; ok { + return -1, ErrDuplicatePath + } + jp.seen[cName] = struct{}{} + } + + e.Position = jp.pos + err := jp.e.Encode(e) + if err != nil { + return -1, err + } + + // made it this far, increment now + jp.pos++ + return e.Position, nil +} + +// NewJSONPacker provides a Packer that writes each Entry (SegmentType and +// FileType) as a json document. +// +// The Entries are delimited by new line. +func NewJSONPacker(w io.Writer) Packer { + return &jsonPacker{ + w: w, + e: json.NewEncoder(w), + seen: seenNames{}, + } +} + +/* +TODO(vbatts) perhaps have a more compact packer/unpacker, maybe using msgapck +(https://github.com/ugorji/go) + + +Even though, since our jsonUnpacker and jsonPacker just take +io.Reader/io.Writer, then we can get away with passing them a +gzip.Reader/gzip.Writer +*/