mirror of
https://github.com/containers/skopeo.git
synced 2025-06-28 15:47:34 +00:00
Merge pull request #265 from masters-of-cats/vendor-errorspkg
Add pkg/errors dependency
This commit is contained in:
commit
686c3fcd7a
@ -14,6 +14,7 @@ clone git github.com/go-check/check v1
|
||||
clone git github.com/stretchr/testify v1.1.3
|
||||
clone git github.com/davecgh/go-spew master
|
||||
clone git github.com/pmezard/go-difflib master
|
||||
clone git github.com/pkg/errors master
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
clone git github.com/docker/docker v1.12.1
|
||||
clone git github.com/docker/engine-api 4eca04ae18f4f93f40196a17b9aa6e11262a7269
|
||||
|
3
vendor/github.com/containers/image/copy/compression.go
generated
vendored
3
vendor/github.com/containers/image/copy/compression.go
generated
vendored
@ -4,9 +4,10 @@ import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
70
vendor/github.com/containers/image/copy/copy.go
generated
vendored
70
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@ -3,7 +3,6 @@ package copy
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -18,6 +17,7 @@ import (
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
|
||||
@ -51,11 +51,11 @@ type imageCopier struct {
|
||||
// and set validationFailed to true if the source stream does not match expectedDigest.
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
}
|
||||
digestAlgorithm := expectedDigest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
|
||||
return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
|
||||
}
|
||||
return &digestingReader{
|
||||
source: source,
|
||||
@ -72,14 +72,14 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// Coverage: This should not happen, the hash.Hash interface requires
|
||||
// d.digest.Write to never return an error, and the io.Writer interface
|
||||
// requires n2 == len(input) if no error is returned.
|
||||
return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err)
|
||||
return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
actualDigest := d.digester.Digest()
|
||||
if actualDigest != d.expectedDigest {
|
||||
d.validationFailed = true
|
||||
return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
@ -106,14 +106,14 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
|
||||
dest, err := destRef.NewImageDestination(options.DestinationCtx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing destination %s: %v", transports.ImageName(destRef), err)
|
||||
return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
}
|
||||
defer dest.Close()
|
||||
destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes()
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing source %s: %v", transports.ImageName(srcRef), err)
|
||||
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
unparsedImage := image.UnparsedFromSource(rawSource)
|
||||
defer func() {
|
||||
@ -124,17 +124,17 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
|
||||
// Please keep this policy check BEFORE reading any other information about the image.
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return fmt.Errorf("Source image rejected: %v", err)
|
||||
return errors.Wrap(err, "Source image rejected")
|
||||
}
|
||||
src, err := image.FromUnparsedImage(unparsedImage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing image from source %s: %v", transports.ImageName(srcRef), err)
|
||||
return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
unparsedImage = nil
|
||||
defer src.Close()
|
||||
|
||||
if src.IsMultiImage() {
|
||||
return fmt.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
|
||||
return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
var sigs [][]byte
|
||||
@ -144,14 +144,14 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
writeReport("Getting image source signatures\n")
|
||||
s, err := src.Signatures()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading signatures: %v", err)
|
||||
return errors.Wrap(err, "Error reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
writeReport("Checking if image destination supports signatures\n")
|
||||
if err := dest.SupportsSignatures(); err != nil {
|
||||
return fmt.Errorf("Can not copy signatures: %v", err)
|
||||
return errors.Wrap(err, "Can not copy signatures")
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,17 +182,17 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
pendingImage := src
|
||||
if !reflect.DeepEqual(manifestUpdates, types.ManifestUpdateOptions{InformationOnly: manifestUpdates.InformationOnly}) {
|
||||
if !canModifyManifest {
|
||||
return fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
|
||||
return errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
|
||||
}
|
||||
manifestUpdates.InformationOnly.Destination = dest
|
||||
pendingImage, err = src.UpdatedImage(manifestUpdates)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating an updated image manifest: %v", err)
|
||||
return errors.Wrap(err, "Error creating an updated image manifest")
|
||||
}
|
||||
}
|
||||
manifest, _, err := pendingImage.Manifest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest: %v", err)
|
||||
return errors.Wrap(err, "Error reading manifest")
|
||||
}
|
||||
|
||||
if err := ic.copyConfig(pendingImage); err != nil {
|
||||
@ -202,33 +202,33 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
if options != nil && options.SignBy != "" {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
return errors.Wrap(err, "Error initializing GPG")
|
||||
}
|
||||
dockerReference := dest.Reference().DockerReference()
|
||||
if dockerReference == nil {
|
||||
return fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
|
||||
return errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
|
||||
}
|
||||
|
||||
writeReport("Signing manifest\n")
|
||||
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, options.SignBy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
return errors.Wrap(err, "Error creating signature")
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
writeReport("Writing manifest to image destination\n")
|
||||
if err := dest.PutManifest(manifest); err != nil {
|
||||
return fmt.Errorf("Error writing manifest: %v", err)
|
||||
return errors.Wrap(err, "Error writing manifest")
|
||||
}
|
||||
|
||||
writeReport("Storing signatures\n")
|
||||
if err := dest.PutSignatures(sigs); err != nil {
|
||||
return fmt.Errorf("Error writing signatures: %v", err)
|
||||
return errors.Wrap(err, "Error writing signatures")
|
||||
}
|
||||
|
||||
if err := dest.Commit(); err != nil {
|
||||
return fmt.Errorf("Error committing the finished image: %v", err)
|
||||
return errors.Wrap(err, "Error committing the finished image")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -293,14 +293,14 @@ func (ic *imageCopier) copyConfig(src types.Image) error {
|
||||
fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest)
|
||||
configBlob, err := src.ConfigBlob()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading config blob %s: %v", srcInfo.Digest, err)
|
||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if destInfo.Digest != srcInfo.Digest {
|
||||
return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -319,7 +319,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
|
||||
// Check if we already have a blob with this digest
|
||||
haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo)
|
||||
if err != nil && err != types.ErrBlobNotFound {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("Error checking for blob %s at destination: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
// If we already have a cached diffID for this blob, we don't need to compute it
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "")
|
||||
@ -327,13 +327,13 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
|
||||
if haveBlob && !diffIDIsNeeded {
|
||||
// Check the blob sizes match, if we were given a size this time
|
||||
if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
|
||||
return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
|
||||
}
|
||||
srcInfo.Size = extantBlobSize
|
||||
// Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
|
||||
blobinfo, err := ic.dest.ReapplyBlob(srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("Error reapplying blob %s at destination: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest)
|
||||
return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err
|
||||
@ -343,7 +343,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
|
||||
fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest)
|
||||
srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
@ -356,7 +356,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest
|
||||
if diffIDIsNeeded {
|
||||
diffIDResult = <-diffIDChan
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("Error computing layer DiffID: %v", diffIDResult.err)
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
|
||||
@ -441,7 +441,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error preparing to verify blob %s: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
|
||||
}
|
||||
var destStream io.Reader = digestingReader
|
||||
|
||||
@ -449,7 +449,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
||||
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by detectCompression.
|
||||
decompressor, destStream, err := detectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
isCompressed := decompressor != nil
|
||||
|
||||
@ -492,7 +492,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
||||
// === Finally, send the layer stream to dest.
|
||||
uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error writing blob: %v", err)
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
||||
}
|
||||
|
||||
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
|
||||
@ -503,15 +503,15 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
||||
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
|
||||
_, err := io.Copy(ioutil.Discard, originalLayerReader)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error reading input blob %s: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
if digestingReader.validationFailed { // Coverage: This should never happen.
|
||||
return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
|
||||
}
|
||||
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
|
||||
return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
}
|
||||
return uploadedInfo, nil
|
||||
}
|
||||
@ -542,7 +542,7 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s
|
||||
|
||||
_, srcType, err := src.Manifest()
|
||||
if err != nil { // This should have been cached?!
|
||||
return fmt.Errorf("Error reading manifest: %v", err)
|
||||
return errors.Wrap(err, "Error reading manifest")
|
||||
}
|
||||
if _, ok := supportedByDest[srcType]; ok {
|
||||
logrus.Debugf("Manifest MIME type %s is declared supported by the destination", srcType)
|
||||
|
6
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
6
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@ -1,13 +1,13 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dirImageDestination struct {
|
||||
@ -78,7 +78,7 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
}
|
||||
computedDigest := digester.Digest()
|
||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||
return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||
}
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@ -96,7 +96,7 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
|
||||
func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath := d.ref.layerPath(info.Digest)
|
||||
finfo, err := os.Stat(blobPath)
|
||||
|
4
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
4
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -9,6 +8,7 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dirImageSource struct {
|
||||
@ -42,7 +42,7 @@ func (s *dirImageSource) GetManifest() ([]byte, string, error) {
|
||||
}
|
||||
|
||||
func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
return nil, "", fmt.Errorf(`Getting target manifest not supported by "dir:"`)
|
||||
return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
|
9
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
9
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
@ -1,11 +1,12 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/directory/explicitfilepath"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
@ -33,7 +34,7 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er
|
||||
// scope passed to this function will not be "", that value is always allowed.
|
||||
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
if !strings.HasPrefix(scope, "/") {
|
||||
return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
|
||||
return errors.Errorf("Invalid scope %s: Must be an absolute path", scope)
|
||||
}
|
||||
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
||||
// and "" could be unexpectedly shadowed by the "/" entry.
|
||||
@ -42,7 +43,7 @@ func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
}
|
||||
cleaned := filepath.Clean(scope)
|
||||
if cleaned != scope {
|
||||
return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
|
||||
return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -153,7 +154,7 @@ func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.Ima
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref dirReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
return fmt.Errorf("Deleting images not implemented for dir: images")
|
||||
return errors.Errorf("Deleting images not implemented for dir: images")
|
||||
}
|
||||
|
||||
// manifestPath returns a path for the manifest within a directory using our conventions.
|
||||
|
7
vendor/github.com/containers/image/directory/explicitfilepath/path.go
generated
vendored
7
vendor/github.com/containers/image/directory/explicitfilepath/path.go
generated
vendored
@ -1,9 +1,10 @@
|
||||
package explicitfilepath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
|
||||
@ -25,14 +26,14 @@ func ResolvePathToFullyExplicit(path string) (string, error) {
|
||||
// This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed.
|
||||
// We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components
|
||||
// in the resulting path, and especially not at the end.
|
||||
return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path)
|
||||
return "", errors.Errorf("Unexpectedly missing special filename component in %s", path)
|
||||
}
|
||||
resolvedPath := filepath.Join(resolvedParent, file)
|
||||
// As a sanity check, ensure that there are no "." or ".." components.
|
||||
cleanedResolvedPath := filepath.Clean(resolvedPath)
|
||||
if cleanedResolvedPath != resolvedPath {
|
||||
// Coverage: This should never happen.
|
||||
return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
|
||||
return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
|
||||
}
|
||||
return resolvedPath, nil
|
||||
default: // err != nil, unrecognized
|
||||
|
22
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
22
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -17,6 +16,7 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/engine-api/client"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -36,16 +36,16 @@ type daemonImageDestination struct {
|
||||
// newImageDestination returns a types.ImageDestination for the specified image reference.
|
||||
func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
|
||||
if ref.ref == nil {
|
||||
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
}
|
||||
namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
}
|
||||
|
||||
c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error initializing docker engine client: %v", err)
|
||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
||||
}
|
||||
|
||||
reader, writer := io.Pipe()
|
||||
@ -84,7 +84,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
|
||||
|
||||
resp, err := c.ImageLoad(ctx, reader, true)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error saving image to docker engine: %v", err)
|
||||
err = errors.Wrap(err, "Error saving image to docker engine")
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
@ -123,7 +123,7 @@ func (d *daemonImageDestination) SupportedManifestMIMETypes() []string {
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *daemonImageDestination) SupportsSignatures() error {
|
||||
return fmt.Errorf("Storing signatures for docker-daemon: destinations is not supported")
|
||||
return errors.Errorf("Storing signatures for docker-daemon: destinations is not supported")
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
@ -181,7 +181,7 @@ func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
|
||||
func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
if blob, ok := d.blobs[info.Digest]; ok {
|
||||
return true, blob.Size, nil
|
||||
@ -196,10 +196,10 @@ func (d *daemonImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInf
|
||||
func (d *daemonImageDestination) PutManifest(m []byte) error {
|
||||
var man schema2Manifest
|
||||
if err := json.Unmarshal(m, &man); err != nil {
|
||||
return fmt.Errorf("Error parsing manifest: %v", err)
|
||||
return errors.Wrap(err, "Error parsing manifest")
|
||||
}
|
||||
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
||||
return fmt.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||
}
|
||||
|
||||
layerPaths := []string{}
|
||||
@ -280,14 +280,14 @@ func (d *daemonImageDestination) sendFile(path string, expectedSize int64, strea
|
||||
return err
|
||||
}
|
||||
if size != expectedSize {
|
||||
return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
|
||||
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *daemonImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) != 0 {
|
||||
return fmt.Errorf("Storing signatures for docker-daemon: destinations is not supported")
|
||||
return errors.Errorf("Storing signatures for docker-daemon: destinations is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
28
vendor/github.com/containers/image/docker/daemon/daemon_src.go
generated
vendored
28
vendor/github.com/containers/image/docker/daemon/daemon_src.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -14,6 +13,7 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/engine-api/client"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -49,13 +49,13 @@ type layerInfo struct {
|
||||
func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
|
||||
c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error initializing docker engine client: %v", err)
|
||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
||||
}
|
||||
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
|
||||
// Either way ImageSave should create a tarball with exactly one image.
|
||||
inputStream, err := c.ImageSave(context.TODO(), []string{ref.StringWithinTransport()})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error loading image from docker engine: %v", err)
|
||||
return nil, errors.Wrap(err, "Error loading image from docker engine")
|
||||
}
|
||||
defer inputStream.Close()
|
||||
|
||||
@ -145,7 +145,7 @@ func (s *daemonImageSource) openTarComponent(componentPath string) (io.ReadClose
|
||||
}
|
||||
|
||||
if !header.FileInfo().Mode().IsRegular() {
|
||||
return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||
}
|
||||
succeeded = true
|
||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||
@ -174,7 +174,7 @@ func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Heade
|
||||
func (s *daemonImageSource) readTarComponent(path string) ([]byte, error) {
|
||||
file, err := s.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error loading tar component %s: %v", path, err)
|
||||
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := ioutil.ReadAll(file)
|
||||
@ -203,7 +203,7 @@ func (s *daemonImageSource) ensureCachedDataIsPresent() error {
|
||||
}
|
||||
var parsedConfig dockerImage // Most fields ommitted, we only care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return fmt.Errorf("Error decoding tar config %s: %v", tarManifest.Config, err)
|
||||
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
||||
@ -229,10 +229,10 @@ func (s *daemonImageSource) loadTarManifest() (*manifestItem, error) {
|
||||
}
|
||||
var items []manifestItem
|
||||
if err := json.Unmarshal(bytes, &items); err != nil {
|
||||
return nil, fmt.Errorf("Error decoding tar manifest.json: %v", err)
|
||||
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
|
||||
}
|
||||
if len(items) != 1 {
|
||||
return nil, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
|
||||
return nil, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
|
||||
}
|
||||
return &items[0], nil
|
||||
}
|
||||
@ -240,7 +240,7 @@ func (s *daemonImageSource) loadTarManifest() (*manifestItem, error) {
|
||||
func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedConfig *dockerImage) (map[diffID]*layerInfo, error) {
|
||||
// Collect layer data available in manifest and config.
|
||||
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
|
||||
return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
|
||||
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
|
||||
}
|
||||
knownLayers := map[diffID]*layerInfo{}
|
||||
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
|
||||
@ -253,7 +253,7 @@ func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedCo
|
||||
}
|
||||
layerPath := tarManifest.Layers[i]
|
||||
if _, ok := unknownLayerSizes[layerPath]; ok {
|
||||
return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
||||
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
||||
}
|
||||
li := &layerInfo{ // A new element in each iteration
|
||||
path: layerPath,
|
||||
@ -284,7 +284,7 @@ func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedCo
|
||||
}
|
||||
}
|
||||
if len(unknownLayerSizes) != 0 {
|
||||
return nil, fmt.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
|
||||
return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
|
||||
}
|
||||
|
||||
return knownLayers, nil
|
||||
@ -310,7 +310,7 @@ func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
|
||||
for _, diffID := range s.orderedDiffIDList {
|
||||
li, ok := s.knownLayers[diffID]
|
||||
if !ok {
|
||||
return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
||||
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
||||
}
|
||||
m.Layers = append(m.Layers, distributionDescriptor{
|
||||
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
|
||||
@ -331,7 +331,7 @@ func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
|
||||
// out of a manifest list.
|
||||
func (s *daemonImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
// How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
|
||||
return nil, "", fmt.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
|
||||
return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
@ -352,7 +352,7 @@ func (s *daemonImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
|
||||
return stream, li.size, nil
|
||||
}
|
||||
|
||||
return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest)
|
||||
return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
|
13
vendor/github.com/containers/image/docker/daemon/daemon_transport.go
generated
vendored
13
vendor/github.com/containers/image/docker/daemon/daemon_transport.go
generated
vendored
@ -1,8 +1,7 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
@ -56,7 +55,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
||||
// The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
|
||||
// Other digest references are ambiguous, so refuse them.
|
||||
if dgst.Algorithm() != digest.Canonical {
|
||||
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
|
||||
return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
|
||||
}
|
||||
return NewReference(dgst, nil)
|
||||
}
|
||||
@ -66,7 +65,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
||||
return nil, err
|
||||
}
|
||||
if ref.Name() == digest.Canonical.String() {
|
||||
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
|
||||
return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
|
||||
}
|
||||
return NewReference("", ref)
|
||||
}
|
||||
@ -78,14 +77,14 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference,
|
||||
}
|
||||
if ref != nil {
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", ref.String())
|
||||
return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", ref.String())
|
||||
}
|
||||
// A github.com/distribution/reference value can have a tag and a digest at the same time!
|
||||
// docker/reference does not handle that, so fail.
|
||||
_, isTagged := ref.(reference.NamedTagged)
|
||||
_, isDigested := ref.(reference.Canonical)
|
||||
if isTagged && isDigested {
|
||||
return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
|
||||
return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
|
||||
}
|
||||
}
|
||||
return daemonReference{
|
||||
@ -175,5 +174,5 @@ func (ref daemonReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
// Should this just untag the image? Should this stop running containers?
|
||||
// The semantics is not quite as clear as for remote repositories.
|
||||
// The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
|
||||
return fmt.Errorf("Deleting images not implemented for docker-daemon: images")
|
||||
return errors.Errorf("Deleting images not implemented for docker-daemon: images")
|
||||
}
|
||||
|
28
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
28
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -20,6 +19,7 @@ import (
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -101,7 +101,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
|
||||
if strings.HasSuffix(f.Name(), ".crt") {
|
||||
systemPool, err := tlsconfig.SystemCertPool()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get system cert pool: %v", err)
|
||||
return errors.Wrap(err, "unable to get system cert pool")
|
||||
}
|
||||
tlsc.RootCAs = systemPool
|
||||
logrus.Debugf("crt: %s", fullPath)
|
||||
@ -116,7 +116,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
|
||||
keyName := certName[:len(certName)-5] + ".key"
|
||||
logrus.Debugf("cert: %s", fullPath)
|
||||
if !hasFile(fs, keyName) {
|
||||
return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||
return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||
}
|
||||
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
|
||||
if err != nil {
|
||||
@ -129,7 +129,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
|
||||
certName := keyName[:len(keyName)-4] + ".cert"
|
||||
logrus.Debugf("key: %s", fullPath)
|
||||
if !hasFile(fs, certName) {
|
||||
return fmt.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||
return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -240,7 +240,7 @@ func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
tokens := strings.SplitN(strings.TrimSpace(c.wwwAuthenticate), " ", 2)
|
||||
if len(tokens) != 2 {
|
||||
return fmt.Errorf("expected 2 tokens in WWW-Authenticate: %d, %s", len(tokens), c.wwwAuthenticate)
|
||||
return errors.Errorf("expected 2 tokens in WWW-Authenticate: %d, %s", len(tokens), c.wwwAuthenticate)
|
||||
}
|
||||
switch tokens[0] {
|
||||
case "Basic":
|
||||
@ -271,11 +271,11 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
// Arbitrarily use the first challenge, there is no reason to expect more than one.
|
||||
challenge := chs[0]
|
||||
if challenge.Scheme != "bearer" { // Another artifact of trying to handle WWW-Authenticate before it actually happens.
|
||||
return fmt.Errorf("Unimplemented: WWW-Authenticate Bearer replaced by %#v", challenge.Scheme)
|
||||
return errors.Errorf("Unimplemented: WWW-Authenticate Bearer replaced by %#v", challenge.Scheme)
|
||||
}
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return fmt.Errorf("missing realm in bearer auth challenge")
|
||||
return errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||
scope, _ := challenge.Parameters["scope"] // Will be "" if not present
|
||||
@ -286,7 +286,7 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("no handler for %s authentication", tokens[0])
|
||||
return errors.Errorf("no handler for %s authentication", tokens[0])
|
||||
// support docker bearer with authconfig's Auth string? see docker2aci
|
||||
}
|
||||
|
||||
@ -317,11 +317,11 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
return "", fmt.Errorf("unable to retrieve auth token: 401 unauthorized")
|
||||
return "", errors.Errorf("unable to retrieve auth token: 401 unauthorized")
|
||||
case http.StatusOK:
|
||||
break
|
||||
default:
|
||||
return "", fmt.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
|
||||
return "", errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
|
||||
}
|
||||
tokenBlob, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
@ -365,7 +365,7 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
|
||||
if os.IsNotExist(err) {
|
||||
return "", "", nil
|
||||
}
|
||||
return "", "", fmt.Errorf("%s - %v", oldDockerCfgPath, err)
|
||||
return "", "", errors.Wrap(err, oldDockerCfgPath)
|
||||
}
|
||||
|
||||
j, err := ioutil.ReadFile(oldDockerCfgPath)
|
||||
@ -377,7 +377,7 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return "", "", fmt.Errorf("%s - %v", dockerCfgPath, err)
|
||||
return "", "", errors.Wrap(err, dockerCfgPath)
|
||||
}
|
||||
|
||||
// I'm feeling lucky
|
||||
@ -414,7 +414,7 @@ func (c *dockerClient) ping() (*pingResponse, error) {
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", scheme+"://"+c.registry+"/v2/", resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return nil, fmt.Errorf("error pinging repository, response code %d", resp.StatusCode)
|
||||
return nil, errors.Errorf("error pinging repository, response code %d", resp.StatusCode)
|
||||
}
|
||||
pr := &pingResponse{}
|
||||
pr.WWWAuthenticate = resp.Header.Get("WWW-Authenticate")
|
||||
@ -427,7 +427,7 @@ func (c *dockerClient) ping() (*pingResponse, error) {
|
||||
pr, err = ping("http")
|
||||
}
|
||||
if err != nil {
|
||||
err = fmt.Errorf("pinging docker registry returned %+v", err)
|
||||
err = errors.Wrap(err, "pinging docker registry returned")
|
||||
if c.ctx != nil && c.ctx.DockerDisableV1Ping {
|
||||
return nil, err
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
3
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Image is a Docker-specific implementation of types.Image with a few extra methods
|
||||
@ -46,7 +47,7 @@ func (i *Image) GetRepositoryTags() ([]string, error) {
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, fmt.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
|
||||
return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
|
||||
}
|
||||
type tagsRes struct {
|
||||
Tags []string
|
||||
|
37
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
37
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dockerImageDestination struct {
|
||||
@ -57,7 +58,7 @@ func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *dockerImageDestination) SupportsSignatures() error {
|
||||
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
|
||||
return errors.Errorf("Pushing signatures to a Docker Registry is not supported")
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
@ -101,11 +102,11 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: getBlobSize(res)}, nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return types.BlobInfo{}, fmt.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName())
|
||||
return types.BlobInfo{}, errors.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName())
|
||||
case http.StatusNotFound:
|
||||
// noop
|
||||
default:
|
||||
return types.BlobInfo{}, fmt.Errorf("failed to read from destination repository %s: %v", d.ref.ref.RemoteName(), http.StatusText(res.StatusCode))
|
||||
return types.BlobInfo{}, errors.Errorf("failed to read from destination repository %s: %v", d.ref.ref.RemoteName(), http.StatusText(res.StatusCode))
|
||||
}
|
||||
logrus.Debugf("... failed, status %d", res.StatusCode)
|
||||
}
|
||||
@ -120,11 +121,11 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||
return types.BlobInfo{}, fmt.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode)
|
||||
return types.BlobInfo{}, errors.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode)
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
|
||||
digester := digest.Canonical.New()
|
||||
@ -140,7 +141,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
|
||||
uploadLocation, err = res.Location()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
|
||||
// FIXME: DELETE uploadLocation on failure
|
||||
@ -156,7 +157,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||
return types.BlobInfo{}, fmt.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
|
||||
return types.BlobInfo{}, errors.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||
@ -165,7 +166,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
|
||||
func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), info.Digest.String())
|
||||
|
||||
@ -181,7 +182,7 @@ func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, erro
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, fmt.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName())
|
||||
return false, -1, errors.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName())
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, types.ErrBlobNotFound
|
||||
@ -225,7 +226,7 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||
logrus.Debugf("Error body %s", string(body))
|
||||
}
|
||||
logrus.Debugf("Error uploading manifest, status %d, %#v", res.StatusCode, res)
|
||||
return fmt.Errorf("Error uploading manifest to %s, status %d", url, res.StatusCode)
|
||||
return errors.Errorf("Error uploading manifest to %s, status %d", url, res.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -239,18 +240,18 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
return nil
|
||||
}
|
||||
if d.c.signatureBase == nil {
|
||||
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured")
|
||||
return errors.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured")
|
||||
}
|
||||
|
||||
if d.manifestDigest.String() == "" {
|
||||
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
|
||||
return fmt.Errorf("Unknown manifest digest, can't add signatures")
|
||||
return errors.Errorf("Unknown manifest digest, can't add signatures")
|
||||
}
|
||||
|
||||
for i, signature := range signatures {
|
||||
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
|
||||
if url == nil {
|
||||
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
err := d.putOneSignature(url, signature)
|
||||
if err != nil {
|
||||
@ -265,7 +266,7 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
for i := len(signatures); ; i++ {
|
||||
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
|
||||
if url == nil {
|
||||
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
missing, err := d.c.deleteOneSignature(url)
|
||||
if err != nil {
|
||||
@ -295,9 +296,9 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
|
||||
return nil
|
||||
|
||||
case "http", "https":
|
||||
return fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
default:
|
||||
return fmt.Errorf("Unsupported scheme when writing signature to %s", url.String())
|
||||
return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
|
||||
}
|
||||
}
|
||||
|
||||
@ -314,9 +315,9 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
|
||||
return false, err
|
||||
|
||||
case "http", "https":
|
||||
return false, fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
default:
|
||||
return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.String())
|
||||
return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
19
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
19
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dockerImageSource struct {
|
||||
@ -139,7 +140,7 @@ func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64
|
||||
resp, err = s.c.makeRequestToResolvedURL("GET", url, nil, nil, -1, false)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
||||
err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
||||
logrus.Debug(err)
|
||||
continue
|
||||
}
|
||||
@ -173,7 +174,7 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, 0, fmt.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
|
||||
}
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
@ -195,7 +196,7 @@ func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||
for i := 0; ; i++ {
|
||||
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||
if url == nil {
|
||||
return nil, fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
signature, missing, err := s.getOneSignature(url)
|
||||
if err != nil {
|
||||
@ -234,7 +235,7 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, true, nil
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, fmt.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
|
||||
}
|
||||
sig, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
@ -243,7 +244,7 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
|
||||
return sig, false, nil
|
||||
|
||||
default:
|
||||
return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.String())
|
||||
return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String())
|
||||
}
|
||||
}
|
||||
|
||||
@ -276,9 +277,9 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
switch get.StatusCode {
|
||||
case http.StatusOK:
|
||||
case http.StatusNotFound:
|
||||
return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
|
||||
return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
|
||||
default:
|
||||
return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
|
||||
return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
|
||||
}
|
||||
|
||||
digest := get.Header.Get("Docker-Content-Digest")
|
||||
@ -297,7 +298,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
return err
|
||||
}
|
||||
if delete.StatusCode != http.StatusAccepted {
|
||||
return fmt.Errorf("Failed to delete %v: %s (%v)", deleteURL, string(body), delete.Status)
|
||||
return errors.Errorf("Failed to delete %v: %s (%v)", deleteURL, string(body), delete.Status)
|
||||
}
|
||||
|
||||
if c.signatureBase != nil {
|
||||
@ -309,7 +310,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
for i := 0; ; i++ {
|
||||
url := signatureStorageURL(c.signatureBase, manifestDigest, i)
|
||||
if url == nil {
|
||||
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
missing, err := c.deleteOneSignature(url)
|
||||
if err != nil {
|
||||
|
9
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
9
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"github.com/containers/image/docker/policyconfiguration"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Transport is an ImageTransport for Docker registry-hosted images.
|
||||
@ -42,7 +43,7 @@ type dockerReference struct {
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
func ParseReference(refString string) (types.ImageReference, error) {
|
||||
if !strings.HasPrefix(refString, "//") {
|
||||
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
|
||||
return nil, errors.Errorf("docker: image reference %s does not start with //", refString)
|
||||
}
|
||||
ref, err := reference.ParseNamed(strings.TrimPrefix(refString, "//"))
|
||||
if err != nil {
|
||||
@ -55,7 +56,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
||||
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
|
||||
func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, fmt.Errorf("Docker reference %s has neither a tag nor a digest", ref.String())
|
||||
return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", ref.String())
|
||||
}
|
||||
// A github.com/distribution/reference value can have a tag and a digest at the same time!
|
||||
// docker/reference does not handle that, so fail.
|
||||
@ -64,7 +65,7 @@ func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
_, isTagged := ref.(reference.NamedTagged)
|
||||
_, isDigested := ref.(reference.Canonical)
|
||||
if isTagged && isDigested {
|
||||
return nil, fmt.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
}
|
||||
return dockerReference{
|
||||
ref: ref,
|
||||
@ -151,5 +152,5 @@ func (ref dockerReference) tagOrDigest() (string, error) {
|
||||
return ref.Tag(), nil
|
||||
}
|
||||
// This should not happen, NewReference above refuses reference.IsNameOnly values.
|
||||
return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", ref.ref.String())
|
||||
return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", ref.ref.String())
|
||||
}
|
||||
|
11
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
11
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/types"
|
||||
@ -60,12 +61,12 @@ func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReferenc
|
||||
|
||||
url, err := url.Parse(topLevel)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid signature storage URL %s: %v", topLevel, err)
|
||||
return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
|
||||
}
|
||||
// FIXME? Restrict to explicitly supported schemes?
|
||||
repo := ref.ref.FullName() // Note that this is without a tag or digest.
|
||||
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
||||
return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
|
||||
return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
|
||||
}
|
||||
url.Path = url.Path + "/" + repo
|
||||
return url, nil
|
||||
@ -114,12 +115,12 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
var config registryConfiguration
|
||||
err = yaml.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing %s: %v", configPath, err)
|
||||
return nil, errors.Wrapf(err, "Error parsing %s", configPath)
|
||||
}
|
||||
|
||||
if config.DefaultDocker != nil {
|
||||
if mergedConfig.DefaultDocker != nil {
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
|
||||
return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
|
||||
dockerDefaultMergedFrom, configPath)
|
||||
}
|
||||
mergedConfig.DefaultDocker = config.DefaultDocker
|
||||
@ -128,7 +129,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
|
||||
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
|
||||
if _, ok := mergedConfig.Docker[nsName]; ok {
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
|
||||
return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
|
||||
nsName, nsMergedFrom[nsName], configPath)
|
||||
}
|
||||
mergedConfig.Docker[nsName] = nsConfig
|
||||
|
8
vendor/github.com/containers/image/docker/policyconfiguration/naming.go
generated
vendored
8
vendor/github.com/containers/image/docker/policyconfiguration/naming.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
package policyconfiguration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
)
|
||||
|
||||
@ -17,9 +17,9 @@ func DockerReferenceIdentity(ref reference.Named) (string, error) {
|
||||
digested, isDigested := ref.(reference.Canonical)
|
||||
switch {
|
||||
case isTagged && isDigested: // This should not happen, docker/reference.ParseNamed drops the tag.
|
||||
return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", ref.String())
|
||||
return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", ref.String())
|
||||
case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
|
||||
return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", ref.String())
|
||||
return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", ref.String())
|
||||
case isTagged:
|
||||
res = res + ":" + tagged.Tag()
|
||||
case isDigested:
|
||||
|
10
vendor/github.com/containers/image/docker/reference/reference.go
generated
vendored
10
vendor/github.com/containers/image/docker/reference/reference.go
generated
vendored
@ -1,11 +1,11 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
)
|
||||
@ -55,7 +55,7 @@ type Canonical interface {
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := distreference.ParseNamed(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s)
|
||||
return nil, errors.Errorf("Error parsing reference: %q is not a valid repository/tag", s)
|
||||
}
|
||||
r, err := WithName(named.Name())
|
||||
if err != nil {
|
||||
@ -207,14 +207,14 @@ var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
||||
|
||||
func validateID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
return fmt.Errorf("image ID %q is invalid", id)
|
||||
return errors.Errorf("image ID %q is invalid", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateName(name string) error {
|
||||
if err := validateID(name); err == nil {
|
||||
return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
|
||||
return errors.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
7
vendor/github.com/containers/image/image/docker_list.go
generated
vendored
7
vendor/github.com/containers/image/image/docker_list.go
generated
vendored
@ -2,13 +2,12 @@ package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type platformSpec struct {
|
||||
@ -54,10 +53,10 @@ func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (gen
|
||||
|
||||
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
return nil, errors.Wrap(err, "Error computing manifest digest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, fmt.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest)
|
||||
return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest)
|
||||
}
|
||||
|
||||
return manifestInstanceFromBlob(src, manblob, mt)
|
||||
|
25
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
25
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
@ -2,8 +2,6 @@ package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@ -12,6 +10,7 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -54,7 +53,7 @@ func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
|
||||
return nil, err
|
||||
}
|
||||
if mschema1.SchemaVersion != 1 {
|
||||
return nil, fmt.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
|
||||
return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
|
||||
}
|
||||
if len(mschema1.FSLayers) != len(mschema1.History) {
|
||||
return nil, errors.New("length of history not equal to number of layers")
|
||||
@ -153,7 +152,7 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||
if options.LayerInfos != nil {
|
||||
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
|
||||
if len(copy.FSLayers) != len(options.LayerInfos) {
|
||||
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
|
||||
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
|
||||
}
|
||||
for i, info := range options.LayerInfos {
|
||||
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
|
||||
@ -171,7 +170,7 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||
case manifest.DockerV2Schema2MediaType:
|
||||
return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
|
||||
default:
|
||||
return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType)
|
||||
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType)
|
||||
}
|
||||
|
||||
return memoryImageFromManifest(©), nil
|
||||
@ -211,7 +210,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
|
||||
for _, img := range imgs {
|
||||
// skip IDs that appear after each other, we handle those later
|
||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
}
|
||||
lastID = img.ID
|
||||
idmap[lastID] = struct{}{}
|
||||
@ -222,7 +221,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
|
||||
manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
|
||||
manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
|
||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
||||
return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -230,7 +229,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
|
||||
|
||||
func validateV1ID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
return fmt.Errorf("image ID %q is invalid", id)
|
||||
return errors.Errorf("image ID %q is invalid", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -239,16 +238,16 @@ func validateV1ID(id string) error {
|
||||
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) {
|
||||
if len(m.History) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
||||
return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
if len(m.History) != len(m.FSLayers) {
|
||||
return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
|
||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
|
||||
}
|
||||
if len(uploadedLayerInfos) != len(m.FSLayers) {
|
||||
return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
|
||||
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
|
||||
}
|
||||
if len(layerDiffIDs) != len(m.FSLayers) {
|
||||
return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
|
||||
return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
|
||||
}
|
||||
|
||||
rootFS := rootFS{
|
||||
@ -263,7 +262,7 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||
|
||||
var v1compat v1Compatibility
|
||||
if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil {
|
||||
return nil, fmt.Errorf("Error decoding history entry %d: %v", v1Index, err)
|
||||
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
|
||||
}
|
||||
history[v2Index] = imageHistory{
|
||||
Created: v1compat.Created,
|
||||
|
20
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
20
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@ -5,7 +5,6 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
@ -13,6 +12,7 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||
@ -82,7 +82,7 @@ func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
|
||||
func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
|
||||
if m.configBlob == nil {
|
||||
if m.src == nil {
|
||||
return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(types.BlobInfo{
|
||||
Digest: m.ConfigDescriptor.Digest,
|
||||
@ -99,7 +99,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
|
||||
}
|
||||
computedDigest := digest.FromBytes(blob)
|
||||
if computedDigest != m.ConfigDescriptor.Digest {
|
||||
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||
}
|
||||
m.configBlob = blob
|
||||
}
|
||||
@ -152,7 +152,7 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||
copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
|
||||
if options.LayerInfos != nil {
|
||||
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
|
||||
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||
}
|
||||
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
|
||||
for i, info := range options.LayerInfos {
|
||||
@ -167,7 +167,7 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||
case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType:
|
||||
return copy.convertToManifestSchema1(options.InformationOnly.Destination)
|
||||
default:
|
||||
return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType)
|
||||
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType)
|
||||
}
|
||||
|
||||
return memoryImageFromManifest(©), nil
|
||||
@ -193,7 +193,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||
haveGzippedEmptyLayer := false
|
||||
if len(imageConfig.History) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
||||
return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
|
||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
|
||||
}
|
||||
for v2Index, historyEntry := range imageConfig.History {
|
||||
parentV1ID = v1ID
|
||||
@ -205,17 +205,17 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error uploading empty layer: %v", err)
|
||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
||||
}
|
||||
if info.Digest != gzippedEmptyLayerDigest {
|
||||
return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
|
||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
|
||||
}
|
||||
haveGzippedEmptyLayer = true
|
||||
}
|
||||
blobDigest = gzippedEmptyLayerDigest
|
||||
} else {
|
||||
if nonemptyLayerIndex >= len(m.LayersDescriptors) {
|
||||
return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
|
||||
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
|
||||
}
|
||||
blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest
|
||||
nonemptyLayerIndex++
|
||||
@ -239,7 +239,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||
fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
|
||||
v1CompatibilityBytes, err := json.Marshal(&fakeImage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
|
||||
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
|
||||
}
|
||||
|
||||
fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest}
|
||||
|
2
vendor/github.com/containers/image/image/memory.go
generated
vendored
2
vendor/github.com/containers/image/image/memory.go
generated
vendored
@ -1,7 +1,7 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
10
vendor/github.com/containers/image/image/oci.go
generated
vendored
10
vendor/github.com/containers/image/image/oci.go
generated
vendored
@ -2,13 +2,13 @@ package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type manifestOCI1 struct {
|
||||
@ -59,7 +59,7 @@ func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
|
||||
func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
|
||||
if m.configBlob == nil {
|
||||
if m.src == nil {
|
||||
return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(types.BlobInfo{
|
||||
Digest: m.ConfigDescriptor.Digest,
|
||||
@ -76,7 +76,7 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
|
||||
}
|
||||
computedDigest := digest.FromBytes(blob)
|
||||
if computedDigest != m.ConfigDescriptor.Digest {
|
||||
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||
}
|
||||
m.configBlob = blob
|
||||
}
|
||||
@ -125,7 +125,7 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
|
||||
copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
|
||||
if options.LayerInfos != nil {
|
||||
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
|
||||
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||
}
|
||||
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
|
||||
for i, info := range options.LayerInfos {
|
||||
@ -139,7 +139,7 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
|
||||
case manifest.DockerV2Schema2MediaType:
|
||||
return copy.convertToManifestSchema2()
|
||||
default:
|
||||
return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType)
|
||||
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType)
|
||||
}
|
||||
|
||||
return memoryImageFromManifest(©), nil
|
||||
|
7
vendor/github.com/containers/image/image/unparsed.go
generated
vendored
7
vendor/github.com/containers/image/image/unparsed.go
generated
vendored
@ -1,11 +1,10 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// UnparsedImage implements types.UnparsedImage .
|
||||
@ -56,10 +55,10 @@ func (i *UnparsedImage) Manifest() ([]byte, string, error) {
|
||||
digest := canonical.Digest()
|
||||
matches, err := manifest.MatchesDigest(m, digest)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
return nil, "", errors.Wrap(err, "Error computing manifest digest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest)
|
||||
return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
14
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
14
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@ -2,13 +2,13 @@ package layout
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
@ -44,7 +44,7 @@ func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *ociImageDestination) SupportsSignatures() error {
|
||||
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
||||
return errors.Errorf("Pushing signatures for OCI images is not supported")
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
@ -89,7 +89,7 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
}
|
||||
computedDigest := digester.Digest()
|
||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||
return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||
}
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@ -114,7 +114,7 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
|
||||
func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath, err := d.ref.blobPath(info.Digest)
|
||||
if err != nil {
|
||||
@ -169,7 +169,7 @@ func createManifest(m []byte) ([]byte, string, error) {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
return m, mt, nil
|
||||
}
|
||||
return nil, "", fmt.Errorf("unrecognized manifest media type %q", mt)
|
||||
return nil, "", errors.Errorf("unrecognized manifest media type %q", mt)
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) PutManifest(m []byte) error {
|
||||
@ -227,7 +227,7 @@ func ensureParentDirectoryExists(path string) error {
|
||||
|
||||
func (d *ociImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) != 0 {
|
||||
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
||||
return errors.Errorf("Pushing signatures for OCI images is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
18
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
18
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package layout
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@ -12,6 +11,7 @@ import (
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Transport is an ImageTransport for OCI directories.
|
||||
@ -43,16 +43,16 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
dir = scope[:sep]
|
||||
tag := scope[sep+1:]
|
||||
if !refRegexp.MatchString(tag) {
|
||||
return fmt.Errorf("Invalid tag %s", tag)
|
||||
return errors.Errorf("Invalid tag %s", tag)
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(dir, ":") {
|
||||
return fmt.Errorf("Invalid OCI reference %s: path contains a colon", scope)
|
||||
return errors.Errorf("Invalid OCI reference %s: path contains a colon", scope)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(dir, "/") {
|
||||
return fmt.Errorf("Invalid scope %s: must be an absolute path", scope)
|
||||
return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
|
||||
}
|
||||
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
||||
// and "" could be unexpectedly shadowed by the "/" entry.
|
||||
@ -62,7 +62,7 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
}
|
||||
cleaned := filepath.Clean(dir)
|
||||
if cleaned != dir {
|
||||
return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
|
||||
return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -106,10 +106,10 @@ func NewReference(dir, tag string) (types.ImageReference, error) {
|
||||
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
|
||||
// from being ambiguous with values of PolicyConfigurationIdentity.
|
||||
if strings.Contains(resolved, ":") {
|
||||
return nil, fmt.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, tag, resolved)
|
||||
return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, tag, resolved)
|
||||
}
|
||||
if !refRegexp.MatchString(tag) {
|
||||
return nil, fmt.Errorf("Invalid tag %s", tag)
|
||||
return nil, errors.Errorf("Invalid tag %s", tag)
|
||||
}
|
||||
return ociReference{dir: dir, resolvedDir: resolved, tag: tag}, nil
|
||||
}
|
||||
@ -191,7 +191,7 @@ func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.Ima
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref ociReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
return fmt.Errorf("Deleting images not implemented for oci: images")
|
||||
return errors.Errorf("Deleting images not implemented for oci: images")
|
||||
}
|
||||
|
||||
// ociLayoutPathPath returns a path for the oci-layout within a directory using OCI conventions.
|
||||
@ -202,7 +202,7 @@ func (ref ociReference) ociLayoutPath() string {
|
||||
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
|
||||
func (ref ociReference) blobPath(digest digest.Digest) (string, error) {
|
||||
if err := digest.Validate(); err != nil {
|
||||
return "", fmt.Errorf("unexpected digest reference %s: %v", digest, err)
|
||||
return "", errors.Wrapf(err, "unexpected digest reference %s", digest)
|
||||
}
|
||||
return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil
|
||||
}
|
||||
|
36
vendor/github.com/containers/image/openshift/openshift-copies.go
generated
vendored
36
vendor/github.com/containers/image/openshift/openshift-copies.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@ -18,6 +17,7 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/pkg/errors"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/homedir"
|
||||
utilnet "k8s.io/kubernetes/pkg/util/net"
|
||||
@ -348,20 +348,20 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
|
||||
|
||||
if len(clusterInfo.Server) == 0 {
|
||||
if len(clusterName) == 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined"))
|
||||
validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined"))
|
||||
} else {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName))
|
||||
}
|
||||
}
|
||||
// Make sure CA data and CA file aren't both specified
|
||||
if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
|
||||
}
|
||||
if len(clusterInfo.CertificateAuthority) != 0 {
|
||||
clientCertCA, err := os.Open(clusterInfo.CertificateAuthority)
|
||||
defer clientCertCA.Close()
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
|
||||
validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
|
||||
}
|
||||
}
|
||||
|
||||
@ -385,36 +385,36 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
|
||||
if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
|
||||
// Make sure cert data and file aren't both specified
|
||||
if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
|
||||
}
|
||||
// Make sure key data and file aren't both specified
|
||||
if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
|
||||
}
|
||||
// Make sure a key is specified
|
||||
if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
|
||||
}
|
||||
|
||||
if len(authInfo.ClientCertificate) != 0 {
|
||||
clientCertFile, err := os.Open(authInfo.ClientCertificate)
|
||||
defer clientCertFile.Close()
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
|
||||
validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
|
||||
}
|
||||
}
|
||||
if len(authInfo.ClientKey) != 0 {
|
||||
clientKeyFile, err := os.Open(authInfo.ClientKey)
|
||||
defer clientKeyFile.Close()
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
|
||||
validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
|
||||
if (len(methods) > 1) && (!usingAuthPath) {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
|
||||
validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
|
||||
}
|
||||
|
||||
return validationErrors
|
||||
@ -518,7 +518,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err))
|
||||
errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -623,7 +623,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
|
||||
}
|
||||
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
|
||||
return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin)
|
||||
}
|
||||
|
||||
if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil {
|
||||
@ -636,7 +636,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
|
||||
}
|
||||
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
|
||||
return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin)
|
||||
}
|
||||
|
||||
if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil {
|
||||
@ -706,7 +706,7 @@ func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
|
||||
// Kubernetes API.
|
||||
func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
|
||||
if host == "" {
|
||||
return nil, fmt.Errorf("host must be a URL or a host:port pair")
|
||||
return nil, errors.Errorf("host must be a URL or a host:port pair")
|
||||
}
|
||||
base := host
|
||||
hostURL, err := url.Parse(base)
|
||||
@ -723,7 +723,7 @@ func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
|
||||
return nil, err
|
||||
}
|
||||
if hostURL.Path != "" && hostURL.Path != "/" {
|
||||
return nil, fmt.Errorf("host must be a URL or a host:port pair: %q", base)
|
||||
return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base)
|
||||
}
|
||||
}
|
||||
|
||||
@ -793,7 +793,7 @@ func transportNew(config *restConfig) (http.RoundTripper, error) {
|
||||
|
||||
// REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains.
|
||||
if len(config.Username) != 0 && len(config.BearerToken) != 0 {
|
||||
return nil, fmt.Errorf("username/password or bearer token may be set, but not both")
|
||||
return nil, errors.Errorf("username/password or bearer token may be set, but not both")
|
||||
}
|
||||
|
||||
return rt, nil
|
||||
@ -832,7 +832,7 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) {
|
||||
return nil, nil
|
||||
}
|
||||
if c.HasCA() && c.Insecure {
|
||||
return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed")
|
||||
return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed")
|
||||
}
|
||||
if err := loadTLSFiles(c); err != nil {
|
||||
return nil, err
|
||||
|
12
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
12
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -18,6 +17,7 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/image/version"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
|
||||
@ -124,7 +124,7 @@ func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]
|
||||
if statusValid {
|
||||
return nil, errors.New(status.Message)
|
||||
}
|
||||
return nil, fmt.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body))
|
||||
return nil, errors.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return body, nil
|
||||
@ -151,7 +151,7 @@ func (c *openshiftClient) getImage(imageStreamImageName string) (*image, error)
|
||||
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
||||
parts := strings.SplitN(ref, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
||||
return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
||||
}
|
||||
return c.ref.dockerReference.Hostname() + "/" + parts[1], nil
|
||||
}
|
||||
@ -267,7 +267,7 @@ func (s *openshiftImageSource) ensureImageIsResolved() error {
|
||||
}
|
||||
}
|
||||
if te == nil {
|
||||
return fmt.Errorf("No matching tag found")
|
||||
return errors.Errorf("No matching tag found")
|
||||
}
|
||||
logrus.Debugf("tag event %#v", te)
|
||||
dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
|
||||
@ -386,7 +386,7 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
||||
|
||||
func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if d.imageStreamImageName == "" {
|
||||
return fmt.Errorf("Internal error: Unknown manifest digest, can't add signatures")
|
||||
return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
|
||||
}
|
||||
// Because image signatures are a shared resource in Atomic Registry, the default upload
|
||||
// always adds signatures. Eventually we should also allow removing signatures.
|
||||
@ -418,7 +418,7 @@ sigExists:
|
||||
randBytes := make([]byte, 16)
|
||||
n, err := rand.Read(randBytes)
|
||||
if err != nil || n != 16 {
|
||||
return fmt.Errorf("Error generating random signature ID: %v, len %d", err, n)
|
||||
return errors.Wrapf(err, "Error generating random signature len %d", n)
|
||||
}
|
||||
signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes)
|
||||
if _, ok := existingSigNames[signatureName]; !ok {
|
||||
|
11
vendor/github.com/containers/image/openshift/openshift_transport.go
generated
vendored
11
vendor/github.com/containers/image/openshift/openshift_transport.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
genericImage "github.com/containers/image/image"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Transport is an ImageTransport for OpenShift registry-hosted images.
|
||||
@ -36,7 +37,7 @@ var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
|
||||
// scope passed to this function will not be "", that value is always allowed.
|
||||
func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
if scopeRegexp.FindStringIndex(scope) == nil {
|
||||
return fmt.Errorf("Invalid scope name %s", scope)
|
||||
return errors.Errorf("Invalid scope name %s", scope)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -52,11 +53,11 @@ type openshiftReference struct {
|
||||
func ParseReference(ref string) (types.ImageReference, error) {
|
||||
r, err := reference.ParseNamed(ref)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse image reference %q, %v", ref, err)
|
||||
return nil, errors.Wrapf(err, "failed to parse image reference %q", ref)
|
||||
}
|
||||
tagged, ok := r.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid image reference %s, %#v", ref, r)
|
||||
return nil, errors.Errorf("invalid image reference %s, %#v", ref, r)
|
||||
}
|
||||
return NewReference(tagged)
|
||||
}
|
||||
@ -65,7 +66,7 @@ func ParseReference(ref string) (types.ImageReference, error) {
|
||||
func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
r := strings.SplitN(dockerRef.RemoteName(), "/", 3)
|
||||
if len(r) != 2 {
|
||||
return nil, fmt.Errorf("invalid image reference %s", dockerRef.String())
|
||||
return nil, errors.Errorf("invalid image reference %s", dockerRef.String())
|
||||
}
|
||||
return openshiftReference{
|
||||
namespace: r[0],
|
||||
@ -146,5 +147,5 @@ func (ref openshiftReference) NewImageDestination(ctx *types.SystemContext) (typ
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref openshiftReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
return fmt.Errorf("Deleting images not implemented for atomic: images")
|
||||
return errors.Errorf("Deleting images not implemented for atomic: images")
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/signature/mechanism.go
generated
vendored
3
vendor/github.com/containers/image/signature/mechanism.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mtrmac/gpgme"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
|
||||
@ -77,7 +78,7 @@ func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, err
|
||||
key, err := m.ctx.GetKey(keyIdentity, true)
|
||||
if err != nil {
|
||||
if e, ok := err.(gpgme.Error); ok && e.Code() == gpgme.ErrorEOF {
|
||||
return nil, fmt.Errorf("key %q not found", keyIdentity)
|
||||
return nil, errors.Errorf("key %q not found", keyIdentity)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
5
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
5
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
@ -15,11 +15,12 @@ package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
@ -405,7 +406,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
|
||||
case !gotKeyPath && !gotKeyData:
|
||||
return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
|
||||
default: // Coverage: This should never happen
|
||||
return fmt.Errorf("Impossible keyPath/keyData presence combination!?")
|
||||
return errors.Errorf("Impossible keyPath/keyData presence combination!?")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
5
vendor/github.com/containers/image/signature/policy_eval.go
generated
vendored
5
vendor/github.com/containers/image/signature/policy_eval.go
generated
vendored
@ -6,10 +6,9 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
|
||||
@ -95,7 +94,7 @@ const (
|
||||
// changeContextState changes pc.state, or fails if the state is unexpected
|
||||
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
|
||||
if pc.state != expected {
|
||||
return fmt.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
|
||||
return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
|
||||
}
|
||||
pc.state = new
|
||||
return nil
|
||||
|
9
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
9
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
@ -3,12 +3,13 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
@ -19,10 +20,10 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig [
|
||||
case SBKeyTypeGPGKeys:
|
||||
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
|
||||
// FIXME? Reject this at policy parsing time already?
|
||||
return sarRejected, nil, fmt.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
|
||||
return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
|
||||
default:
|
||||
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
|
||||
return sarRejected, nil, fmt.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
|
||||
return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
|
||||
}
|
||||
|
||||
if pr.KeyPath != "" && pr.KeyData != nil {
|
||||
@ -116,7 +117,7 @@ func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, er
|
||||
// Huh?! This should not happen at all; treat it as any other invalid value.
|
||||
fallthrough
|
||||
default:
|
||||
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
|
||||
reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
|
||||
}
|
||||
rejections = append(rejections, reason)
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/signature/signature.go
generated
vendored
3
vendor/github.com/containers/image/signature/signature.go
generated
vendored
@ -4,10 +4,11 @@ package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/version"
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
22
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
22
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
@ -3,12 +3,12 @@ package storage
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
@ -78,7 +78,7 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) {
|
||||
}
|
||||
img, err := imageRef.transport.store.GetImage(id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading image %q: %v", id, err)
|
||||
return nil, errors.Wrapf(err, "error reading image %q", id)
|
||||
}
|
||||
image := &storageImageSource{
|
||||
imageRef: imageRef,
|
||||
@ -90,7 +90,7 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) {
|
||||
SignatureSizes: []int{},
|
||||
}
|
||||
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata for source image: %v", err)
|
||||
return nil, errors.Wrap(err, "error decoding metadata for source image")
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
@ -278,7 +278,7 @@ func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobI
|
||||
|
||||
func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
|
||||
if blobinfo.Digest == "" {
|
||||
return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
for _, blob := range s.BlobList {
|
||||
if blob.Digest == blobinfo.Digest {
|
||||
@ -468,7 +468,7 @@ func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64,
|
||||
}
|
||||
if layer.Metadata != "" {
|
||||
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
|
||||
return nil, -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err)
|
||||
return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
|
||||
}
|
||||
}
|
||||
if layerMeta.CompressedSize <= 0 {
|
||||
@ -504,7 +504,7 @@ func (s *storageImageSource) GetSignatures() (signatures [][]byte, err error) {
|
||||
offset += length
|
||||
}
|
||||
if offset != len(signature) {
|
||||
return nil, fmt.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
|
||||
return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
|
||||
}
|
||||
return sigslice, nil
|
||||
}
|
||||
@ -513,12 +513,12 @@ func (s *storageImageSource) getSize() (int64, error) {
|
||||
var sum int64
|
||||
names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error reading image %q: %v", s.imageRef.id, err)
|
||||
return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id)
|
||||
}
|
||||
for _, name := range names {
|
||||
bigSize, err := s.imageRef.transport.store.GetImageBigDataSize(s.imageRef.id, name)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error reading data blob size %q for %q: %v", name, s.imageRef.id, err)
|
||||
return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id)
|
||||
}
|
||||
sum += bigSize
|
||||
}
|
||||
@ -536,11 +536,11 @@ func (s *storageImageSource) getSize() (int64, error) {
|
||||
}
|
||||
if layer.Metadata != "" {
|
||||
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
|
||||
return -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err)
|
||||
return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
|
||||
}
|
||||
}
|
||||
if layerMeta.Size < 0 {
|
||||
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
|
||||
return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
|
||||
}
|
||||
sum += layerMeta.Size
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
3
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
@ -1,11 +1,12 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
|
5
vendor/github.com/containers/image/transports/transports.go
generated
vendored
5
vendor/github.com/containers/image/transports/transports.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
"github.com/containers/image/openshift"
|
||||
"github.com/containers/image/storage"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// KnownTransports is a registry of known ImageTransport instances.
|
||||
@ -40,11 +41,11 @@ func init() {
|
||||
func ParseImageName(imgName string) (types.ImageReference, error) {
|
||||
parts := strings.SplitN(imgName, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
|
||||
return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
|
||||
}
|
||||
transport, ok := KnownTransports[parts[0]]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
|
||||
return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
|
||||
}
|
||||
return transport.ParseReference(parts[1])
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/types/types.go
generated
vendored
3
vendor/github.com/containers/image/types/types.go
generated
vendored
@ -1,10 +1,11 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
24
vendor/github.com/pkg/errors/.gitignore
generated
vendored
Normal file
24
vendor/github.com/pkg/errors/.gitignore
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
11
vendor/github.com/pkg/errors/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/pkg/errors/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
language: go
|
||||
go_import_path: github.com/pkg/errors
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.5.4
|
||||
- 1.6.3
|
||||
- 1.7.3
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test -v ./...
|
23
vendor/github.com/pkg/errors/LICENSE
generated
vendored
Normal file
23
vendor/github.com/pkg/errors/LICENSE
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
52
vendor/github.com/pkg/errors/README.md
generated
vendored
Normal file
52
vendor/github.com/pkg/errors/README.md
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors)
|
||||
|
||||
Package errors provides simple error handling primitives.
|
||||
|
||||
`go get github.com/pkg/errors`
|
||||
|
||||
The traditional error handling idiom in Go is roughly akin to
|
||||
```go
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
|
||||
|
||||
## Adding context to an error
|
||||
|
||||
The errors.Wrap function returns a new error that adds context to the original error. For example
|
||||
```go
|
||||
_, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "read failed")
|
||||
}
|
||||
```
|
||||
## Retrieving the cause of an error
|
||||
|
||||
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
|
||||
```go
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
```
|
||||
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
|
||||
```go
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case *MyError:
|
||||
// handle specifically
|
||||
default:
|
||||
// unknown error
|
||||
}
|
||||
```
|
||||
|
||||
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
|
||||
|
||||
Before proposing a change, please discuss your change by raising an issue.
|
||||
|
||||
## Licence
|
||||
|
||||
BSD-2-Clause
|
32
vendor/github.com/pkg/errors/appveyor.yml
generated
vendored
Normal file
32
vendor/github.com/pkg/errors/appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
version: build-{build}.{branch}
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\pkg\errors
|
||||
shallow_clone: true # for startup speed
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform:
|
||||
- x64
|
||||
|
||||
# http://www.appveyor.com/docs/installed-software
|
||||
install:
|
||||
# some helpful output for debugging builds
|
||||
- go version
|
||||
- go env
|
||||
# pre-installed MinGW at C:\MinGW is 32bit only
|
||||
# but MSYS2 at C:\msys64 has mingw64
|
||||
- set PATH=C:\msys64\mingw64\bin;%PATH%
|
||||
- gcc --version
|
||||
- g++ --version
|
||||
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
|
||||
test_script:
|
||||
- set PATH=C:\gopath\bin;%PATH%
|
||||
- go test -v ./...
|
||||
|
||||
#artifacts:
|
||||
# - path: '%GOPATH%\bin\*.exe'
|
||||
deploy: off
|
269
vendor/github.com/pkg/errors/errors.go
generated
vendored
Normal file
269
vendor/github.com/pkg/errors/errors.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
// Package errors provides simple error handling primitives.
|
||||
//
|
||||
// The traditional error handling idiom in Go is roughly akin to
|
||||
//
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// which applied recursively up the call stack results in error reports
|
||||
// without context or debugging information. The errors package allows
|
||||
// programmers to add context to the failure path in their code in a way
|
||||
// that does not destroy the original value of the error.
|
||||
//
|
||||
// Adding context to an error
|
||||
//
|
||||
// The errors.Wrap function returns a new error that adds context to the
|
||||
// original error by recording a stack trace at the point Wrap is called,
|
||||
// and the supplied message. For example
|
||||
//
|
||||
// _, err := ioutil.ReadAll(r)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "read failed")
|
||||
// }
|
||||
//
|
||||
// If additional control is required the errors.WithStack and errors.WithMessage
|
||||
// functions destructure errors.Wrap into its component operations of annotating
|
||||
// an error with a stack trace and an a message, respectively.
|
||||
//
|
||||
// Retrieving the cause of an error
|
||||
//
|
||||
// Using errors.Wrap constructs a stack of errors, adding context to the
|
||||
// preceding error. Depending on the nature of the error it may be necessary
|
||||
// to reverse the operation of errors.Wrap to retrieve the original error
|
||||
// for inspection. Any error value which implements this interface
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
|
||||
// the topmost error which does not implement causer, which is assumed to be
|
||||
// the original cause. For example:
|
||||
//
|
||||
// switch err := errors.Cause(err).(type) {
|
||||
// case *MyError:
|
||||
// // handle specifically
|
||||
// default:
|
||||
// // unknown error
|
||||
// }
|
||||
//
|
||||
// causer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
//
|
||||
// Formatted printing of errors
|
||||
//
|
||||
// All error values returned from this package implement fmt.Formatter and can
|
||||
// be formatted by the fmt package. The following verbs are supported
|
||||
//
|
||||
// %s print the error. If the error has a Cause it will be
|
||||
// printed recursively
|
||||
// %v see %s
|
||||
// %+v extended format. Each Frame of the error's StackTrace will
|
||||
// be printed in detail.
|
||||
//
|
||||
// Retrieving the stack trace of an error or wrapper
|
||||
//
|
||||
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
|
||||
// invoked. This information can be retrieved with the following interface.
|
||||
//
|
||||
// type stackTracer interface {
|
||||
// StackTrace() errors.StackTrace
|
||||
// }
|
||||
//
|
||||
// Where errors.StackTrace is defined as
|
||||
//
|
||||
// type StackTrace []Frame
|
||||
//
|
||||
// The Frame type represents a call site in the stack trace. Frame supports
|
||||
// the fmt.Formatter interface that can be used for printing information about
|
||||
// the stack trace of this error. For example:
|
||||
//
|
||||
// if err, ok := err.(stackTracer); ok {
|
||||
// for _, f := range err.StackTrace() {
|
||||
// fmt.Printf("%+s:%d", f)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// stackTracer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
//
|
||||
// See the documentation for Frame.Format for more details.
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// New returns an error with the supplied message.
|
||||
// New also records the stack trace at the point it was called.
|
||||
func New(message string) error {
|
||||
return &fundamental{
|
||||
msg: message,
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a value that satisfies error.
|
||||
// Errorf also records the stack trace at the point it was called.
|
||||
func Errorf(format string, args ...interface{}) error {
|
||||
return &fundamental{
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// fundamental is an error that has a message and a stack, but no caller.
|
||||
type fundamental struct {
|
||||
msg string
|
||||
*stack
|
||||
}
|
||||
|
||||
func (f *fundamental) Error() string { return f.msg }
|
||||
|
||||
func (f *fundamental) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
io.WriteString(s, f.msg)
|
||||
f.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, f.msg)
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", f.msg)
|
||||
}
|
||||
}
|
||||
|
||||
// WithStack annotates err with a stack trace at the point WithStack was called.
|
||||
// If err is nil, WithStack returns nil.
|
||||
func WithStack(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
type withStack struct {
|
||||
error
|
||||
*stack
|
||||
}
|
||||
|
||||
func (w *withStack) Cause() error { return w.error }
|
||||
|
||||
func (w *withStack) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v", w.Cause())
|
||||
w.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, w.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap returns an error annotating err with a stack trace
|
||||
// at the point Wrap is called, and the supplied message.
|
||||
// If err is nil, Wrap returns nil.
|
||||
func Wrap(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapf returns an error annotating err with a stack trace
|
||||
// at the point Wrapf is call, and the format specifier.
|
||||
// If err is nil, Wrapf returns nil.
|
||||
func Wrapf(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithMessage annotates err with a new message.
|
||||
// If err is nil, WithMessage returns nil.
|
||||
func WithMessage(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
}
|
||||
|
||||
type withMessage struct {
|
||||
cause error
|
||||
msg string
|
||||
}
|
||||
|
||||
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
|
||||
func (w *withMessage) Cause() error { return w.cause }
|
||||
|
||||
func (w *withMessage) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v\n", w.Cause())
|
||||
io.WriteString(s, w.msg)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's', 'q':
|
||||
io.WriteString(s, w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Cause returns the underlying cause of the error, if possible.
|
||||
// An error value has a cause if it implements the following
|
||||
// interface:
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// If the error does not implement Cause, the original error will
|
||||
// be returned. If the error is nil, nil will be returned without further
|
||||
// investigation.
|
||||
func Cause(err error) error {
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
178
vendor/github.com/pkg/errors/stack.go
generated
vendored
Normal file
178
vendor/github.com/pkg/errors/stack.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Frame represents a program counter inside a stack frame.
|
||||
type Frame uintptr
|
||||
|
||||
// pc returns the program counter for this frame;
|
||||
// multiple frames may have the same PC value.
|
||||
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
|
||||
|
||||
// file returns the full path to the file that contains the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) file() string {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return "unknown"
|
||||
}
|
||||
file, _ := fn.FileLine(f.pc())
|
||||
return file
|
||||
}
|
||||
|
||||
// line returns the line number of source code of the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) line() int {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return 0
|
||||
}
|
||||
_, line := fn.FileLine(f.pc())
|
||||
return line
|
||||
}
|
||||
|
||||
// Format formats the frame according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s source file
|
||||
// %d source line
|
||||
// %n function name
|
||||
// %v equivalent to %s:%d
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+s path of source file relative to the compile time GOPATH
|
||||
// %+v equivalent to %+s:%d
|
||||
func (f Frame) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 's':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
pc := f.pc()
|
||||
fn := runtime.FuncForPC(pc)
|
||||
if fn == nil {
|
||||
io.WriteString(s, "unknown")
|
||||
} else {
|
||||
file, _ := fn.FileLine(pc)
|
||||
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
|
||||
}
|
||||
default:
|
||||
io.WriteString(s, path.Base(f.file()))
|
||||
}
|
||||
case 'd':
|
||||
fmt.Fprintf(s, "%d", f.line())
|
||||
case 'n':
|
||||
name := runtime.FuncForPC(f.pc()).Name()
|
||||
io.WriteString(s, funcname(name))
|
||||
case 'v':
|
||||
f.Format(s, 's')
|
||||
io.WriteString(s, ":")
|
||||
f.Format(s, 'd')
|
||||
}
|
||||
}
|
||||
|
||||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||
type StackTrace []Frame
|
||||
|
||||
func (st StackTrace) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
for _, f := range st {
|
||||
fmt.Fprintf(s, "\n%+v", f)
|
||||
}
|
||||
case s.Flag('#'):
|
||||
fmt.Fprintf(s, "%#v", []Frame(st))
|
||||
default:
|
||||
fmt.Fprintf(s, "%v", []Frame(st))
|
||||
}
|
||||
case 's':
|
||||
fmt.Fprintf(s, "%s", []Frame(st))
|
||||
}
|
||||
}
|
||||
|
||||
// stack represents a stack of program counters.
|
||||
type stack []uintptr
|
||||
|
||||
func (s *stack) Format(st fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case st.Flag('+'):
|
||||
for _, pc := range *s {
|
||||
f := Frame(pc)
|
||||
fmt.Fprintf(st, "\n%+v", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stack) StackTrace() StackTrace {
|
||||
f := make([]Frame, len(*s))
|
||||
for i := 0; i < len(f); i++ {
|
||||
f[i] = Frame((*s)[i])
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func callers() *stack {
|
||||
const depth = 32
|
||||
var pcs [depth]uintptr
|
||||
n := runtime.Callers(3, pcs[:])
|
||||
var st stack = pcs[0:n]
|
||||
return &st
|
||||
}
|
||||
|
||||
// funcname removes the path prefix component of a function's name reported by func.Name().
|
||||
func funcname(name string) string {
|
||||
i := strings.LastIndex(name, "/")
|
||||
name = name[i+1:]
|
||||
i = strings.Index(name, ".")
|
||||
return name[i+1:]
|
||||
}
|
||||
|
||||
func trimGOPATH(name, file string) string {
|
||||
// Here we want to get the source file path relative to the compile time
|
||||
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
|
||||
// GOPATH at runtime, but we can infer the number of path segments in the
|
||||
// GOPATH. We note that fn.Name() returns the function name qualified by
|
||||
// the import path, which does not include the GOPATH. Thus we can trim
|
||||
// segments from the beginning of the file path until the number of path
|
||||
// separators remaining is one more than the number of path separators in
|
||||
// the function name. For example, given:
|
||||
//
|
||||
// GOPATH /home/user
|
||||
// file /home/user/src/pkg/sub/file.go
|
||||
// fn.Name() pkg/sub.Type.Method
|
||||
//
|
||||
// We want to produce:
|
||||
//
|
||||
// pkg/sub/file.go
|
||||
//
|
||||
// From this we can easily see that fn.Name() has one less path separator
|
||||
// than our desired output. We count separators from the end of the file
|
||||
// path until it finds two more than in the function name and then move
|
||||
// one character forward to preserve the initial path segment without a
|
||||
// leading separator.
|
||||
const sep = "/"
|
||||
goal := strings.Count(name, sep) + 2
|
||||
i := len(file)
|
||||
for n := 0; n < goal; n++ {
|
||||
i = strings.LastIndex(file[:i], sep)
|
||||
if i == -1 {
|
||||
// not enough separators found, set i so that the slice expression
|
||||
// below leaves file unmodified
|
||||
i = -len(sep)
|
||||
break
|
||||
}
|
||||
}
|
||||
// get back to 0 or trim the leading separator
|
||||
file = file[i+len(sep):]
|
||||
return file
|
||||
}
|
Loading…
Reference in New Issue
Block a user