mirror of
https://github.com/containers/skopeo.git
synced 2025-07-07 11:49:18 +00:00
Bump github.com/containers/image/v5 from 5.5.2 to 5.6.0
Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.5.2 to 5.6.0. - [Release notes](https://github.com/containers/image/releases) - [Commits](https://github.com/containers/image/compare/v5.5.2...v5.6.0) Signed-off-by: dependabot-preview[bot] <support@dependabot.com> Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
parent
12ab19f5fd
commit
1d0b1671f8
2
go.mod
2
go.mod
@ -4,7 +4,7 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.22.0
|
||||
github.com/containers/image/v5 v5.5.2
|
||||
github.com/containers/image/v5 v5.6.0
|
||||
github.com/containers/ocicrypt v1.0.3
|
||||
github.com/containers/storage v1.23.5
|
||||
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f
|
||||
|
12
go.sum
12
go.sum
@ -44,6 +44,8 @@ github.com/containers/common v0.22.0 h1:MjJIMka4pJddHsfZpQCF7jOmX6vXqMs0ojDeYmPK
|
||||
github.com/containers/common v0.22.0/go.mod h1:qsLcLHM7ha5Nc+JDp5duBwfwEfrnlfjXL/K8HO96QHw=
|
||||
github.com/containers/image/v5 v5.5.2 h1:fv7FArz0zUnjH0W0l8t90CqWFlFcQrPP6Pug+9dUtVI=
|
||||
github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
|
||||
github.com/containers/image/v5 v5.6.0 h1:r4AqIX4NO/X7OJkqX574zITV3fq0ZPn0pSlLsxWF6ww=
|
||||
github.com/containers/image/v5 v5.6.0/go.mod h1:iUSWo3SOLqJo0CkZkKrHxqR6YWqrT98mkXFpE0MceE8=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
|
||||
@ -140,6 +142,8 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
@ -298,12 +302,16 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
|
||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
|
||||
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ=
|
||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
|
||||
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||
github.com/vbauerster/mpb/v5 v5.2.2 h1:zIICVOm+XD+uV6crpSORaL6I0Q1WqOdvxZTp+r3L9cw=
|
||||
github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww=
|
||||
github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
|
||||
github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
|
||||
@ -320,6 +328,8 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
@ -386,6 +396,8 @@ golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
|
||||
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
|
4
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@ -377,7 +377,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
if len(sigs) != 0 {
|
||||
c.Printf("Checking if image list destination supports signatures\n")
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return nil, "", errors.Wrap(err, "Can not copy signatures")
|
||||
return nil, "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
}
|
||||
canModifyManifestList := (len(sigs) == 0)
|
||||
@ -595,7 +595,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
if len(sigs) != 0 {
|
||||
c.Printf("Checking if image destination supports signatures\n")
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return nil, "", "", errors.Wrap(err, "Can not copy signatures")
|
||||
return nil, "", "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
}
|
||||
|
||||
|
50
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
50
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
@ -3,9 +3,8 @@ package archive
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/docker/tarfile"
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -13,37 +12,38 @@ import (
|
||||
type archiveImageDestination struct {
|
||||
*tarfile.Destination // Implements most of types.ImageDestination
|
||||
ref archiveReference
|
||||
writer io.Closer
|
||||
archive *tarfile.Writer // Should only be closed if writer != nil
|
||||
writer io.Closer // May be nil if the archive is shared
|
||||
}
|
||||
|
||||
func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
|
||||
// ref.path can be either a pipe or a regular file
|
||||
// in the case of a pipe, we require that we can open it for write
|
||||
// in the case of a regular file, we don't want to overwrite any pre-existing file
|
||||
// so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
|
||||
// only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
|
||||
fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening file %q", ref.path)
|
||||
if ref.sourceIndex != -1 {
|
||||
return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||
}
|
||||
|
||||
fhStat, err := fh.Stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error statting file %q", ref.path)
|
||||
}
|
||||
var archive *tarfile.Writer
|
||||
var writer io.Closer
|
||||
if ref.archiveWriter != nil {
|
||||
archive = ref.archiveWriter
|
||||
writer = nil
|
||||
} else {
|
||||
fh, err := openArchiveForWriting(ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
|
||||
return nil, errors.New("docker-archive doesn't support modifying existing images")
|
||||
archive = tarfile.NewWriter(fh)
|
||||
writer = fh
|
||||
}
|
||||
|
||||
tarDest := tarfile.NewDestinationWithContext(sys, fh, ref.destinationRef)
|
||||
tarDest := tarfile.NewDestination(sys, archive, ref.ref)
|
||||
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
|
||||
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
|
||||
}
|
||||
return &archiveImageDestination{
|
||||
Destination: tarDest,
|
||||
ref: ref,
|
||||
writer: fh,
|
||||
archive: archive,
|
||||
writer: writer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -60,7 +60,10 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *archiveImageDestination) Close() error {
|
||||
return d.writer.Close()
|
||||
if d.writer != nil {
|
||||
return d.writer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
@ -68,5 +71,8 @@ func (d *archiveImageDestination) Close() error {
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
return d.Destination.Commit(ctx)
|
||||
if d.writer != nil {
|
||||
return d.archive.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
120
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
Normal file
120
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Reader manages a single Docker archive, allows listing its contents and accessing
|
||||
// individual images with less overhead than creating image references individually
|
||||
// (because the archive is, if necessary, copied or decompressed only once).
|
||||
type Reader struct {
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
archive *tarfile.Reader
|
||||
}
|
||||
|
||||
// NewReader returns a Reader for path.
|
||||
// The caller should call .Close() on the returned object.
|
||||
func NewReader(sys *types.SystemContext, path string) (*Reader, error) {
|
||||
archive, err := tarfile.NewReaderFromFile(sys, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Reader{
|
||||
path: path,
|
||||
archive: archive,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close deletes temporary files associated with the Reader, if any.
|
||||
func (r *Reader) Close() error {
|
||||
return r.archive.Close()
|
||||
}
|
||||
|
||||
// NewReaderForReference creates a Reader from a Reader-independent imageReference, which must be from docker/archive.Transport,
|
||||
// and a variant of imageReference that points at the same image within the reader.
|
||||
// The caller should call .Close() on the returned Reader.
|
||||
func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) {
|
||||
standalone, ok := ref.(archiveReference)
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
|
||||
}
|
||||
if standalone.archiveReader != nil {
|
||||
return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
|
||||
}
|
||||
reader, err := NewReader(sys, standalone.path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
reader.Close()
|
||||
}
|
||||
}()
|
||||
readerRef, err := newReference(standalone.path, standalone.ref, standalone.sourceIndex, reader.archive, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
succeeded = true
|
||||
return reader, readerRef, nil
|
||||
}
|
||||
|
||||
// List returns the a set of references for images in the Reader,
|
||||
// grouped by the image the references point to.
|
||||
// The references are valid only until the Reader is closed.
|
||||
func (r *Reader) List() ([][]types.ImageReference, error) {
|
||||
res := [][]types.ImageReference{}
|
||||
for imageIndex, image := range r.archive.Manifest {
|
||||
refs := []types.ImageReference{}
|
||||
for _, tag := range image.RepoTags {
|
||||
parsedTag, err := reference.ParseNormalizedNamed(tag)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex)
|
||||
}
|
||||
nt, ok := parsedTag.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
|
||||
}
|
||||
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
|
||||
}
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error creating a reference for manifest item @%d", imageIndex)
|
||||
}
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
res = append(res, refs)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ManifestTagsForReference returns the set of tags “matching” ref in reader, as strings
|
||||
// (i.e. exposing the short names before normalization).
|
||||
// The function reports an error if ref does not identify a single image.
|
||||
// If ref contains a NamedTagged reference, only a single tag “matching” ref is returned;
|
||||
// If ref contains a source index, or neither a NamedTagged nor a source index, all tags
|
||||
// matching the image are returned.
|
||||
// Almost all users should use List() or ImageReference.DockerReference() instead.
|
||||
func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) {
|
||||
archiveRef, ok := ref.(archiveReference)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
|
||||
}
|
||||
manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tagIndex != -1 {
|
||||
return []string{manifestItem.RepoTags[tagIndex]}, nil
|
||||
}
|
||||
return manifestItem.RepoTags, nil
|
||||
}
|
22
vendor/github.com/containers/image/v5/docker/archive/src.go
generated
vendored
22
vendor/github.com/containers/image/v5/docker/archive/src.go
generated
vendored
@ -3,9 +3,8 @@ package archive
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/tarfile"
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type archiveImageSource struct {
|
||||
@ -16,13 +15,20 @@ type archiveImageSource struct {
|
||||
// newImageSource returns a types.ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
|
||||
if ref.destinationRef != nil {
|
||||
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
|
||||
}
|
||||
src, err := tarfile.NewSourceFromFileWithContext(sys, ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var archive *tarfile.Reader
|
||||
var closeArchive bool
|
||||
if ref.archiveReader != nil {
|
||||
archive = ref.archiveReader
|
||||
closeArchive = false
|
||||
} else {
|
||||
a, err := tarfile.NewReaderFromFile(sys, ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive = a
|
||||
closeArchive = true
|
||||
}
|
||||
src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex)
|
||||
return &archiveImageSource{
|
||||
Source: src,
|
||||
ref: ref,
|
||||
|
95
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
95
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
@ -3,8 +3,10 @@ package archive
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
ctrImage "github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/transports"
|
||||
@ -42,9 +44,16 @@ func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
// archiveReference is an ImageReference for Docker images.
|
||||
type archiveReference struct {
|
||||
path string
|
||||
// only used for destinations,
|
||||
// archiveReference.destinationRef is optional and can be nil for destinations as well.
|
||||
destinationRef reference.NamedTagged
|
||||
// May be nil to read the only image in an archive, or to create an untagged image.
|
||||
ref reference.NamedTagged
|
||||
// If not -1, a zero-based index of the image in the manifest. Valid only for sources.
|
||||
// Must not be set if ref is set.
|
||||
sourceIndex int
|
||||
// If not nil, must have been created from path (but archiveReader.path may point at a temporary
|
||||
// file, not necesarily path precisely).
|
||||
archiveReader *tarfile.Reader
|
||||
// If not nil, must have been created for path
|
||||
archiveWriter *tarfile.Writer
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
@ -55,37 +64,69 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
||||
|
||||
parts := strings.SplitN(refString, ":", 2)
|
||||
path := parts[0]
|
||||
var destinationRef reference.NamedTagged
|
||||
var nt reference.NamedTagged
|
||||
sourceIndex := -1
|
||||
|
||||
// A :tag was specified, which is only necessary for destinations.
|
||||
if len(parts) == 2 {
|
||||
ref, err := reference.ParseNormalizedNamed(parts[1])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "docker-archive parsing reference")
|
||||
// A :tag or :@index was specified.
|
||||
if len(parts[1]) > 0 && parts[1][0] == '@' {
|
||||
i, err := strconv.Atoi(parts[1][1:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Invalid source index %s", parts[1])
|
||||
}
|
||||
if i < 0 {
|
||||
return nil, errors.Errorf("Invalid source index @%d: must not be negative", i)
|
||||
}
|
||||
sourceIndex = i
|
||||
} else {
|
||||
ref, err := reference.ParseNormalizedNamed(parts[1])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "docker-archive parsing reference")
|
||||
}
|
||||
ref = reference.TagNameOnly(ref)
|
||||
refTagged, isTagged := ref.(reference.NamedTagged)
|
||||
if !isTagged { // If ref contains a digest, TagNameOnly does not change it
|
||||
return nil, errors.Errorf("reference does not include a tag: %s", ref.String())
|
||||
}
|
||||
nt = refTagged
|
||||
}
|
||||
ref = reference.TagNameOnly(ref)
|
||||
refTagged, isTagged := ref.(reference.NamedTagged)
|
||||
if !isTagged {
|
||||
// Really shouldn't be hit...
|
||||
return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString)
|
||||
}
|
||||
destinationRef = refTagged
|
||||
}
|
||||
|
||||
return NewReference(path, destinationRef)
|
||||
return newReference(path, nt, sourceIndex, nil, nil)
|
||||
}
|
||||
|
||||
// NewReference rethrns a Docker archive reference for a path and an optional destination reference.
|
||||
func NewReference(path string, destinationRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
// NewReference returns a Docker archive reference for a path and an optional reference.
|
||||
func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, error) {
|
||||
return newReference(path, ref, -1, nil, nil)
|
||||
}
|
||||
|
||||
// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index.
|
||||
func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) {
|
||||
return newReference(path, nil, sourceIndex, nil, nil)
|
||||
}
|
||||
|
||||
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
|
||||
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
|
||||
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
|
||||
archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
|
||||
if strings.Contains(path, ":") {
|
||||
return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
|
||||
}
|
||||
if _, isDigest := destinationRef.(reference.Canonical); isDigest {
|
||||
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", destinationRef.String())
|
||||
if ref != nil && sourceIndex != -1 {
|
||||
return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
|
||||
}
|
||||
if _, isDigest := ref.(reference.Canonical); isDigest {
|
||||
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String())
|
||||
}
|
||||
if sourceIndex != -1 && sourceIndex < 0 {
|
||||
return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
|
||||
}
|
||||
return archiveReference{
|
||||
path: path,
|
||||
destinationRef: destinationRef,
|
||||
path: path,
|
||||
ref: ref,
|
||||
sourceIndex: sourceIndex,
|
||||
archiveReader: archiveReader,
|
||||
archiveWriter: archiveWriter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -99,17 +140,21 @@ func (ref archiveReference) Transport() types.ImageTransport {
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref archiveReference) StringWithinTransport() string {
|
||||
if ref.destinationRef == nil {
|
||||
switch {
|
||||
case ref.ref != nil:
|
||||
return fmt.Sprintf("%s:%s", ref.path, ref.ref.String())
|
||||
case ref.sourceIndex != -1:
|
||||
return fmt.Sprintf("%s:@%d", ref.path, ref.sourceIndex)
|
||||
default:
|
||||
return ref.path
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String())
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
|
||||
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
|
||||
func (ref archiveReference) DockerReference() reference.Named {
|
||||
return ref.destinationRef
|
||||
return ref.ref
|
||||
}
|
||||
|
||||
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
|
||||
|
82
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
Normal file
82
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Writer manages a single in-progress Docker archive and allows adding images to it.
|
||||
type Writer struct {
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
archive *tarfile.Writer
|
||||
writer io.Closer
|
||||
}
|
||||
|
||||
// NewWriter returns a Writer for path.
|
||||
// The caller should call .Close() on the returned object.
|
||||
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
|
||||
fh, err := openArchiveForWriting(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive := tarfile.NewWriter(fh)
|
||||
|
||||
return &Writer{
|
||||
path: path,
|
||||
archive: archive,
|
||||
writer: fh,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close writes all outstanding data about images to the archive, and
|
||||
// releases state associated with the Writer, if any.
|
||||
// No more images can be added after this is called.
|
||||
func (w *Writer) Close() error {
|
||||
err := w.archive.Close()
|
||||
if err2 := w.writer.Close(); err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewReference returns an ImageReference that allows adding an image to Writer,
|
||||
// with an optional reference.
|
||||
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
return newReference(w.path, destinationRef, -1, nil, w.archive)
|
||||
}
|
||||
|
||||
// openArchiveForWriting opens path for writing a tar archive,
|
||||
// making a few sanity checks.
|
||||
func openArchiveForWriting(path string) (*os.File, error) {
|
||||
// path can be either a pipe or a regular file
|
||||
// in the case of a pipe, we require that we can open it for write
|
||||
// in the case of a regular file, we don't want to overwrite any pre-existing file
|
||||
// so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
|
||||
// only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
|
||||
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening file %q", path)
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
fh.Close()
|
||||
}
|
||||
}()
|
||||
fhStat, err := fh.Stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error statting file %q", path)
|
||||
}
|
||||
|
||||
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
|
||||
return nil, errors.New("docker-archive doesn't support modifying existing images")
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return fh, nil
|
||||
}
|
9
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
9
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/docker/tarfile"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
@ -16,6 +16,7 @@ type daemonImageDestination struct {
|
||||
ref daemonReference
|
||||
mustMatchRuntimeOS bool
|
||||
*tarfile.Destination // Implements most of types.ImageDestination
|
||||
archive *tarfile.Writer
|
||||
// For talking to imageLoadGoroutine
|
||||
goroutineCancel context.CancelFunc
|
||||
statusChannel <-chan error
|
||||
@ -45,6 +46,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
|
||||
}
|
||||
|
||||
reader, writer := io.Pipe()
|
||||
archive := tarfile.NewWriter(writer)
|
||||
// Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
|
||||
statusChannel := make(chan error, 1)
|
||||
|
||||
@ -54,7 +56,8 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
|
||||
return &daemonImageDestination{
|
||||
ref: ref,
|
||||
mustMatchRuntimeOS: mustMatchRuntimeOS,
|
||||
Destination: tarfile.NewDestinationWithContext(sys, writer, namedTaggedRef),
|
||||
Destination: tarfile.NewDestination(sys, archive, namedTaggedRef),
|
||||
archive: archive,
|
||||
goroutineCancel: goroutineCancel,
|
||||
statusChannel: statusChannel,
|
||||
writer: writer,
|
||||
@ -130,7 +133,7 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
logrus.Debugf("docker-daemon: Closing tar stream")
|
||||
if err := d.Destination.Commit(ctx); err != nil {
|
||||
if err := d.archive.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.writer.Close(); err != nil {
|
||||
|
5
vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
generated
vendored
@ -3,7 +3,7 @@ package daemon
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/tarfile"
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -35,10 +35,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
|
||||
}
|
||||
defer inputStream.Close()
|
||||
|
||||
src, err := tarfile.NewSourceFromStreamWithSystemContext(sys, inputStream)
|
||||
archive, err := tarfile.NewReaderFromStream(sys, inputStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src := tarfile.NewSource(archive, true, nil, -1)
|
||||
return &daemonImageSource{
|
||||
ref: ref,
|
||||
Source: src,
|
||||
|
15
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
15
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@ -154,7 +153,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
|
||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
@ -175,7 +174,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if !successStatus(res.StatusCode) {
|
||||
return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer chunked")
|
||||
return nil, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer chunked")
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
@ -201,7 +200,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation)
|
||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer to %s", uploadLocation)
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||
@ -226,7 +225,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
@ -277,7 +276,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
default:
|
||||
logrus.Debugf("Error mounting, response %#v", *res)
|
||||
return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
return errors.Wrapf(registryHTTPResponseToError(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
}
|
||||
}
|
||||
|
||||
@ -414,7 +413,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if !successStatus(res.StatusCode) {
|
||||
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
|
||||
err = errors.Wrapf(registryHTTPResponseToError(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
|
||||
if isManifestInvalidError(errors.Cause(err)) {
|
||||
err = types.ManifestTypeRejectedError{Err: err}
|
||||
}
|
||||
@ -641,7 +640,7 @@ sigExists:
|
||||
logrus.Debugf("Error body %s", string(body))
|
||||
}
|
||||
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
|
||||
return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry)
|
||||
return errors.Wrapf(registryHTTPResponseToError(res), "Error uploading signature to %s in %s", path, d.c.registry)
|
||||
}
|
||||
}
|
||||
|
||||
|
6
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -193,7 +192,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||
logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
|
||||
return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
|
||||
}
|
||||
|
||||
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
|
||||
@ -235,6 +234,9 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
resp *http.Response
|
||||
err error
|
||||
)
|
||||
if len(urls) == 0 {
|
||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
for _, url := range urls {
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
|
14
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
14
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
@ -44,3 +44,17 @@ func httpResponseToError(res *http.Response, context string) error {
|
||||
return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
|
||||
// registry
|
||||
func registryHTTPResponseToError(res *http.Response) error {
|
||||
errResponse := client.HandleErrorResponse(res)
|
||||
if e, ok := perrors.Cause(errResponse).(*client.UnexpectedHTTPResponseError); ok {
|
||||
response := string(e.Response)
|
||||
if len(response) > 50 {
|
||||
response = response[:50] + "..."
|
||||
}
|
||||
errResponse = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
|
||||
}
|
||||
return errResponse
|
||||
}
|
||||
|
217
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
Normal file
217
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
Normal file
@ -0,0 +1,217 @@
|
||||
package tarfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
|
||||
type Destination struct {
|
||||
archive *Writer
|
||||
repoTags []reference.NamedTagged
|
||||
// Other state.
|
||||
config []byte
|
||||
sysCtx *types.SystemContext
|
||||
}
|
||||
|
||||
// NewDestination returns a tarfile.Destination adding images to the specified Writer.
|
||||
func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination {
|
||||
repoTags := []reference.NamedTagged{}
|
||||
if ref != nil {
|
||||
repoTags = append(repoTags, ref)
|
||||
}
|
||||
return &Destination{
|
||||
archive: archive,
|
||||
repoTags: repoTags,
|
||||
sysCtx: sys,
|
||||
}
|
||||
}
|
||||
|
||||
// AddRepoTags adds the specified tags to the destination's repoTags.
|
||||
func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
|
||||
d.repoTags = append(d.repoTags, tags...)
|
||||
}
|
||||
|
||||
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
||||
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
||||
func (d *Destination) SupportedManifestMIMETypes() []string {
|
||||
return []string{
|
||||
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
|
||||
}
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *Destination) SupportsSignatures(ctx context.Context) error {
|
||||
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
// uploaded to the image destination, true otherwise.
|
||||
func (d *Destination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *Destination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *Destination) HasThreadSafePutBlob() bool {
|
||||
// The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
|
||||
// this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
|
||||
// much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
|
||||
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer os.Remove(streamCopy.Name())
|
||||
defer streamCopy.Close()
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
tee := io.TeeReader(stream, digester.Hash())
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
size, err := io.Copy(streamCopy, tee)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
_, err = streamCopy.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
|
||||
if inputInfo.Digest == "" {
|
||||
inputInfo.Digest = digester.Digest()
|
||||
}
|
||||
stream = streamCopy
|
||||
logrus.Debugf("... streaming done")
|
||||
}
|
||||
|
||||
if err := d.archive.lock(); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer d.archive.unlock()
|
||||
|
||||
// Maybe the blob has been already sent
|
||||
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if ok {
|
||||
return reusedInfo, nil
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
|
||||
}
|
||||
d.config = buf
|
||||
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
|
||||
}
|
||||
} else {
|
||||
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
}
|
||||
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if err := d.archive.lock(); err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
defer d.archive.unlock()
|
||||
|
||||
return d.archive.tryReusingBlobLocked(info)
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
|
||||
// there can be no secondary manifests.
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||
func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
|
||||
if instanceDigest != nil {
|
||||
return errors.New(`Manifest lists are not supported for docker tar files`)
|
||||
}
|
||||
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
|
||||
// so the caller trying a different manifest kind would be pointless.
|
||||
var man manifest.Schema2
|
||||
if err := json.Unmarshal(m, &man); err != nil {
|
||||
return errors.Wrap(err, "Error parsing manifest")
|
||||
}
|
||||
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
||||
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||
}
|
||||
|
||||
if err := d.archive.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer d.archive.unlock()
|
||||
|
||||
if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
|
||||
}
|
||||
|
||||
// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
|
||||
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
|
||||
// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
|
||||
func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
|
||||
if instanceDigest != nil {
|
||||
return errors.Errorf(`Manifest lists are not supported for docker tar files`)
|
||||
}
|
||||
if len(signatures) != 0 {
|
||||
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
269
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
Normal file
269
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
package tarfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
|
||||
type Reader struct {
|
||||
// None of the fields below are modified after the archive is created, until .Close();
|
||||
// this allows concurrent readers of the same archive.
|
||||
path string // "" if the archive has already been closed.
|
||||
removeOnClose bool // Remove file on close if true
|
||||
Manifest []ManifestItem // Guaranteed to exist after the archive is created.
|
||||
}
|
||||
|
||||
// NewReaderFromFile returns a Reader for the specified path.
|
||||
// The caller should call .Close() on the returned archive when done.
|
||||
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening file %q", path)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// If the file is already not compressed we can just return the file itself
|
||||
// as a source. Otherwise we pass the stream to NewReaderFromStream.
|
||||
stream, isCompressed, err := compression.AutoDecompress(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
|
||||
}
|
||||
defer stream.Close()
|
||||
if !isCompressed {
|
||||
return newReader(path, false)
|
||||
}
|
||||
return NewReaderFromStream(sys, stream)
|
||||
}
|
||||
|
||||
// NewReaderFromStream returns a Reader for the specified inputStream,
|
||||
// which can be either compressed or uncompressed. The caller can close the
|
||||
// inputStream immediately after NewReaderFromFile returns.
|
||||
// The caller should call .Close() on the returned archive when done.
|
||||
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
|
||||
// Save inputStream to a temporary file
|
||||
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating temporary file")
|
||||
}
|
||||
defer tarCopyFile.Close()
|
||||
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
os.Remove(tarCopyFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
// In order to be compatible with docker-load, we need to support
|
||||
// auto-decompression (it's also a nice quality-of-life thing to avoid
|
||||
// giving users really confusing "invalid tar header" errors).
|
||||
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error auto-decompressing input")
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
// Copy the plain archive to the temporary file.
|
||||
//
|
||||
// TODO: This can take quite some time, and should ideally be cancellable
|
||||
// using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
|
||||
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
|
||||
}
|
||||
succeeded = true
|
||||
|
||||
return newReader(tarCopyFile.Name(), true)
|
||||
}
|
||||
|
||||
// newReader creates a Reader for the specified path and removeOnClose flag.
|
||||
// The caller should call .Close() on the returned archive when done.
|
||||
func newReader(path string, removeOnClose bool) (*Reader, error) {
|
||||
// This is a valid enough archive, except Manifest is not yet filled.
|
||||
r := Reader{
|
||||
path: path,
|
||||
removeOnClose: removeOnClose,
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// We initialize Manifest immediately when constructing the Reader instead
|
||||
// of later on-demand because every caller will need the data, and because doing it now
|
||||
// removes the need to synchronize the access/creation of the data if the archive is later
|
||||
// used from multiple goroutines to access different images.
|
||||
|
||||
// FIXME? Do we need to deal with the legacy format?
|
||||
bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
|
||||
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized Reader, if any.
|
||||
func (r *Reader) Close() error {
|
||||
path := r.path
|
||||
r.path = "" // Mark the archive as closed
|
||||
if r.removeOnClose {
|
||||
return os.Remove(path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or
|
||||
// both of which should be (nil, -1).
|
||||
// On success, it returns the manifest item and an index of the matching tag, if a tag was used
|
||||
// for matching; the index is -1 if a tag was not used.
|
||||
func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
|
||||
switch {
|
||||
case ref != nil && sourceIndex != -1:
|
||||
return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d",
|
||||
ref.String(), sourceIndex)
|
||||
|
||||
case ref != nil:
|
||||
refString := ref.String()
|
||||
for i := range r.Manifest {
|
||||
for tagIndex, tag := range r.Manifest[i].RepoTags {
|
||||
parsedTag, err := reference.ParseNormalizedNamed(tag)
|
||||
if err != nil {
|
||||
return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
|
||||
}
|
||||
if parsedTag.String() == refString {
|
||||
return &r.Manifest[i], tagIndex, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, -1, errors.Errorf("Tag %#v not found", refString)
|
||||
|
||||
case sourceIndex != -1:
|
||||
if sourceIndex >= len(r.Manifest) {
|
||||
return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available",
|
||||
sourceIndex, len(r.Manifest))
|
||||
}
|
||||
return &r.Manifest[sourceIndex], -1, nil
|
||||
|
||||
default:
|
||||
if len(r.Manifest) != 1 {
|
||||
return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
|
||||
}
|
||||
return &r.Manifest[0], -1, nil
|
||||
}
|
||||
}
|
||||
|
||||
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
|
||||
type tarReadCloser struct {
|
||||
*tar.Reader
|
||||
backingFile *os.File
|
||||
}
|
||||
|
||||
func (t *tarReadCloser) Close() error {
|
||||
return t.backingFile.Close()
|
||||
}
|
||||
|
||||
// openTarComponent returns a ReadCloser for the specific file within the archive.
|
||||
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
|
||||
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
|
||||
// It is safe to call this method from multiple goroutines simultaneously.
|
||||
// The caller should call .Close() on the returned stream.
|
||||
func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
||||
// This is only a sanity check; if anyone did concurrently close ra, this access is technically
|
||||
// racy against the write in .Close().
|
||||
if r.path == "" {
|
||||
return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader")
|
||||
}
|
||||
|
||||
f, err := os.Open(r.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
tarReader, header, err := findTarComponent(f, componentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
|
||||
// We follow only one symlink; so no loops are possible.
|
||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
|
||||
// so we don't care.
|
||||
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
}
|
||||
|
||||
if !header.FileInfo().Mode().IsRegular() {
|
||||
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||
}
|
||||
succeeded = true
|
||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||
}
|
||||
|
||||
// findTarComponent returns a header and a reader matching componentPath within inputFile,
|
||||
// or (nil, nil, nil) if not found.
|
||||
func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) {
|
||||
t := tar.NewReader(inputFile)
|
||||
componentPath = path.Clean(componentPath)
|
||||
for {
|
||||
h, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if path.Clean(h.Name) == componentPath {
|
||||
return t, h, nil
|
||||
}
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// readTarComponent returns full contents of componentPath.
|
||||
// It is safe to call this method from multiple goroutines simultaneously.
|
||||
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
|
||||
file, err := r.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
@ -11,8 +11,8 @@ import (
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -22,8 +22,11 @@ import (
|
||||
|
||||
// Source is a partial implementation of types.ImageSource for reading from tarPath.
|
||||
type Source struct {
|
||||
tarPath string
|
||||
removeTarPathOnClose bool // Remove temp file on close if true
|
||||
archive *Reader
|
||||
closeArchive bool // .Close() the archive when the source is closed.
|
||||
// If ref is nil and sourceIndex is -1, indicates the only image in the archive.
|
||||
ref reference.NamedTagged // May be nil
|
||||
sourceIndex int // May be -1
|
||||
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
||||
tarManifest *ManifestItem // nil if not available yet.
|
||||
configBytes []byte
|
||||
@ -41,180 +44,16 @@ type layerInfo struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
// TODO: We could add support for multiple images in a single archive, so
|
||||
// that people could use docker-archive:opensuse.tar:opensuse:leap as
|
||||
// the source of an image.
|
||||
// To do for both the NewSourceFromFile and NewSourceFromStream functions
|
||||
|
||||
// NewSourceFromFile returns a tarfile.Source for the specified path.
|
||||
// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory
|
||||
// for big files through SystemContext.BigFilesTemporaryDir
|
||||
func NewSourceFromFile(path string) (*Source, error) {
|
||||
return NewSourceFromFileWithContext(nil, path)
|
||||
}
|
||||
|
||||
// NewSourceFromFileWithContext returns a tarfile.Source for the specified path.
|
||||
func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening file %q", path)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// If the file is already not compressed we can just return the file itself
|
||||
// as a source. Otherwise we pass the stream to NewSourceFromStream.
|
||||
stream, isCompressed, err := compression.AutoDecompress(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
|
||||
}
|
||||
defer stream.Close()
|
||||
if !isCompressed {
|
||||
return &Source{
|
||||
tarPath: path,
|
||||
}, nil
|
||||
}
|
||||
return NewSourceFromStreamWithSystemContext(sys, stream)
|
||||
}
|
||||
|
||||
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
|
||||
// which can be either compressed or uncompressed. The caller can close the
|
||||
// inputStream immediately after NewSourceFromFile returns.
|
||||
// Deprecated: Please use NewSourceFromStreamWithSystemContext which will allows you to configure
|
||||
// temp directory for big files through SystemContext.BigFilesTemporaryDir
|
||||
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
|
||||
return NewSourceFromStreamWithSystemContext(nil, inputStream)
|
||||
}
|
||||
|
||||
// NewSourceFromStreamWithSystemContext returns a tarfile.Source for the specified inputStream,
|
||||
// which can be either compressed or uncompressed. The caller can close the
|
||||
// inputStream immediately after NewSourceFromFile returns.
|
||||
func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) {
|
||||
// FIXME: use SystemContext here.
|
||||
// Save inputStream to a temporary file
|
||||
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating temporary file")
|
||||
}
|
||||
defer tarCopyFile.Close()
|
||||
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
os.Remove(tarCopyFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
// In order to be compatible with docker-load, we need to support
|
||||
// auto-decompression (it's also a nice quality-of-life thing to avoid
|
||||
// giving users really confusing "invalid tar header" errors).
|
||||
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error auto-decompressing input")
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
// Copy the plain archive to the temporary file.
|
||||
//
|
||||
// TODO: This can take quite some time, and should ideally be cancellable
|
||||
// using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
|
||||
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
|
||||
}
|
||||
succeeded = true
|
||||
|
||||
// NewSource returns a tarfile.Source for an image in the specified archive matching ref
|
||||
// and sourceIndex (or the only image if they are (nil, -1)).
|
||||
// The archive will be closed if closeArchive
|
||||
func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source {
|
||||
return &Source{
|
||||
tarPath: tarCopyFile.Name(),
|
||||
removeTarPathOnClose: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
|
||||
type tarReadCloser struct {
|
||||
*tar.Reader
|
||||
backingFile *os.File
|
||||
}
|
||||
|
||||
func (t *tarReadCloser) Close() error {
|
||||
return t.backingFile.Close()
|
||||
}
|
||||
|
||||
// openTarComponent returns a ReadCloser for the specific file within the archive.
|
||||
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
|
||||
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
|
||||
// The caller should call .Close() on the returned stream.
|
||||
func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
||||
f, err := os.Open(s.tarPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
archive: archive,
|
||||
closeArchive: closeArchive,
|
||||
ref: ref,
|
||||
sourceIndex: sourceIndex,
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
tarReader, header, err := findTarComponent(f, componentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
|
||||
// We follow only one symlink; so no loops are possible.
|
||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
|
||||
// so we don't care.
|
||||
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
}
|
||||
|
||||
if !header.FileInfo().Mode().IsRegular() {
|
||||
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||
}
|
||||
succeeded = true
|
||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||
}
|
||||
|
||||
// findTarComponent returns a header and a reader matching path within inputFile,
|
||||
// or (nil, nil, nil) if not found.
|
||||
func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
|
||||
t := tar.NewReader(inputFile)
|
||||
for {
|
||||
h, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if h.Name == path {
|
||||
return t, h, nil
|
||||
}
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// readTarComponent returns full contents of componentPath.
|
||||
func (s *Source) readTarComponent(path string, limit int) ([]byte, error) {
|
||||
file, err := s.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
||||
@ -229,37 +68,31 @@ func (s *Source) ensureCachedDataIsPresent() error {
|
||||
// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent.
|
||||
// Call ensureCachedDataIsPresent instead.
|
||||
func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
||||
// Read and parse manifest.json
|
||||
tarManifest, err := s.loadTarManifest()
|
||||
tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check to make sure length is 1
|
||||
if len(tarManifest) != 1 {
|
||||
return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
|
||||
}
|
||||
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config, iolimits.MaxConfigBodySize)
|
||||
configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
|
||||
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
|
||||
}
|
||||
if parsedConfig.RootFS == nil {
|
||||
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest[0].Config)
|
||||
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
|
||||
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Success; commit.
|
||||
s.tarManifest = &tarManifest[0]
|
||||
s.tarManifest = tarManifest
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
@ -267,31 +100,17 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadTarManifest loads and decodes the manifest.json.
|
||||
func (s *Source) loadTarManifest() ([]ManifestItem, error) {
|
||||
// FIXME? Do we need to deal with the legacy format?
|
||||
bytes, err := s.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var items []ManifestItem
|
||||
if err := json.Unmarshal(bytes, &items); err != nil {
|
||||
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized Source, if any.
|
||||
func (s *Source) Close() error {
|
||||
if s.removeTarPathOnClose {
|
||||
return os.Remove(s.tarPath)
|
||||
if s.closeArchive {
|
||||
return s.archive.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadTarManifest loads and decodes the manifest.json
|
||||
func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
|
||||
return s.loadTarManifest()
|
||||
// TarManifest returns contents of manifest.json
|
||||
func (s *Source) TarManifest() []ManifestItem {
|
||||
return s.archive.Manifest
|
||||
}
|
||||
|
||||
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
|
||||
@ -308,7 +127,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
|
||||
continue
|
||||
}
|
||||
layerPath := tarManifest.Layers[i]
|
||||
layerPath := path.Clean(tarManifest.Layers[i])
|
||||
if _, ok := unknownLayerSizes[layerPath]; ok {
|
||||
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
||||
}
|
||||
@ -321,7 +140,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
}
|
||||
|
||||
// Scan the tar file to collect layer sizes.
|
||||
file, err := os.Open(s.tarPath)
|
||||
file, err := os.Open(s.archive.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -335,7 +154,9 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if li, ok := unknownLayerSizes[h.Name]; ok {
|
||||
layerPath := path.Clean(h.Name)
|
||||
// FIXME: Cache this data across images in Reader.
|
||||
if li, ok := unknownLayerSizes[layerPath]; ok {
|
||||
// Since GetBlob will decompress layers that are compressed we need
|
||||
// to do the decompression here as well, otherwise we will
|
||||
// incorrectly report the size. Pretty critical, since tools like
|
||||
@ -343,7 +164,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
// the slower method of checking if it's compressed.
|
||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name)
|
||||
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", layerPath)
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
@ -351,11 +172,11 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name)
|
||||
return nil, errors.Wrapf(err, "Error reading %s to find its size", layerPath)
|
||||
}
|
||||
}
|
||||
li.size = uncompressedSize
|
||||
delete(unknownLayerSizes, h.Name)
|
||||
delete(unknownLayerSizes, layerPath)
|
||||
}
|
||||
}
|
||||
if len(unknownLayerSizes) != 0 {
|
||||
@ -446,7 +267,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
|
||||
}
|
||||
|
||||
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
|
||||
underlyingStream, err := s.openTarComponent(li.path)
|
||||
underlyingStream, err := s.archive.openTarComponent(li.path)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
@ -17,7 +17,7 @@ const (
|
||||
)
|
||||
|
||||
// ManifestItem is an element of the array stored in the top-level manifest.json file.
|
||||
type ManifestItem struct {
|
||||
type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API.
|
||||
Config string
|
||||
RepoTags []string
|
||||
Layers []string
|
381
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
Normal file
381
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
Normal file
@ -0,0 +1,381 @@
|
||||
package tarfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
|
||||
type Writer struct {
|
||||
mutex sync.Mutex
|
||||
// ALL of the following members can only be accessed with the mutex held.
|
||||
// Use Writer.lock() to obtain the mutex.
|
||||
writer io.Writer
|
||||
tar *tar.Writer // nil if the Writer has already been closed.
|
||||
// Other state.
|
||||
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
|
||||
repositories map[string]map[string]string
|
||||
legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
|
||||
manifest []ManifestItem
|
||||
manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
|
||||
}
|
||||
|
||||
// NewWriter returns a Writer for the specified io.Writer.
|
||||
// The caller must eventually call .Close() on the returned object to create a valid archive.
|
||||
func NewWriter(dest io.Writer) *Writer {
|
||||
return &Writer{
|
||||
writer: dest,
|
||||
tar: tar.NewWriter(dest),
|
||||
blobs: make(map[digest.Digest]types.BlobInfo),
|
||||
repositories: map[string]map[string]string{},
|
||||
legacyLayers: map[string]struct{}{},
|
||||
manifestByConfig: map[digest.Digest]int{},
|
||||
}
|
||||
}
|
||||
|
||||
// lock does some sanity checks and locks the Writer.
|
||||
// If this function suceeds, the caller must call w.unlock.
|
||||
// Do not use Writer.mutex directly.
|
||||
func (w *Writer) lock() error {
|
||||
w.mutex.Lock()
|
||||
if w.tar == nil {
|
||||
w.mutex.Unlock()
|
||||
return errors.New("Internal error: trying to use an already closed tarfile.Writer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unlock releases the lock obtained by Writer.lock
|
||||
// Do not use Writer.mutex directly.
|
||||
func (w *Writer) unlock() {
|
||||
w.mutex.Unlock()
|
||||
}
|
||||
|
||||
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
}
|
||||
if blob, ok := w.blobs[info.Digest]; ok {
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
|
||||
}
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) recordBlobLocked(info types.BlobInfo) {
|
||||
w.blobs[info.Digest] = info
|
||||
}
|
||||
|
||||
// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
|
||||
if _, ok := w.legacyLayers[layerID]; !ok {
|
||||
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
|
||||
// See also the comment in physicalLayerPath.
|
||||
physicalLayerPath := w.physicalLayerPath(layerDigest)
|
||||
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
|
||||
return errors.Wrap(err, "Error creating layer symbolic link")
|
||||
}
|
||||
|
||||
b := []byte("1.0")
|
||||
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
|
||||
return errors.Wrap(err, "Error writing VERSION file")
|
||||
}
|
||||
|
||||
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
|
||||
return errors.Wrap(err, "Error writing config json file")
|
||||
}
|
||||
|
||||
w.legacyLayers[layerID] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image.
|
||||
func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error {
|
||||
var chainID digest.Digest
|
||||
lastLayerID := ""
|
||||
for i, l := range layerDescriptors {
|
||||
// The legacy format requires a config file per layer
|
||||
layerConfig := make(map[string]interface{})
|
||||
|
||||
// The root layer doesn't have any parent
|
||||
if lastLayerID != "" {
|
||||
layerConfig["parent"] = lastLayerID
|
||||
}
|
||||
// The top layer configuration file is generated by using subpart of the image configuration
|
||||
if i == len(layerDescriptors)-1 {
|
||||
var config map[string]*json.RawMessage
|
||||
err := json.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error unmarshaling config")
|
||||
}
|
||||
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
|
||||
layerConfig[attr] = config[attr]
|
||||
}
|
||||
}
|
||||
|
||||
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
|
||||
if chainID == "" {
|
||||
chainID = l.Digest
|
||||
} else {
|
||||
chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
|
||||
}
|
||||
// … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because
|
||||
// we create the image configs differently in details. At least recent versions allocate new IDs on load,
|
||||
// so this is fine as long as the IDs we use are unique / cannot loop.
|
||||
//
|
||||
// For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created
|
||||
// config makes sure that everything uses the same “namespace”; a bit less efficient but clearer.
|
||||
//
|
||||
// Temporarily add the chainID to the config, only for the purpose of generating the image ID.
|
||||
layerConfig["layer_id"] = chainID
|
||||
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error marshaling layer config")
|
||||
}
|
||||
delete(layerConfig, "layer_id")
|
||||
layerID := digest.Canonical.FromBytes(b).Hex()
|
||||
layerConfig["id"] = layerID
|
||||
|
||||
configBytes, err := json.Marshal(layerConfig)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error marshaling layer config")
|
||||
}
|
||||
|
||||
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastLayerID = layerID
|
||||
}
|
||||
|
||||
if lastLayerID != "" {
|
||||
for _, repoTag := range repoTags {
|
||||
if val, ok := w.repositories[repoTag.Name()]; ok {
|
||||
val[repoTag.Tag()] = lastLayerID
|
||||
} else {
|
||||
w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkManifestItemsMatch checks that a and b describe the same image,
|
||||
// and returns an error if that’s not the case (which should never happen).
|
||||
func checkManifestItemsMatch(a, b *ManifestItem) error {
|
||||
if a.Config != b.Config {
|
||||
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
|
||||
}
|
||||
if len(a.Layers) != len(b.Layers) {
|
||||
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
|
||||
}
|
||||
for i := range a.Layers {
|
||||
if a.Layers[i] != b.Layers[i] {
|
||||
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
|
||||
}
|
||||
}
|
||||
// Ignore RepoTags, that will be built later.
|
||||
// Ignore Parent and LayerSources, which we don’t set to anything meaningful.
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
|
||||
layerPaths := []string{}
|
||||
for _, l := range layerDescriptors {
|
||||
layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
|
||||
}
|
||||
|
||||
var item *ManifestItem
|
||||
newItem := ManifestItem{
|
||||
Config: w.configPath(configDigest),
|
||||
RepoTags: []string{},
|
||||
Layers: layerPaths,
|
||||
Parent: "", // We don’t have this information
|
||||
LayerSources: nil,
|
||||
}
|
||||
if i, ok := w.manifestByConfig[configDigest]; ok {
|
||||
item = &w.manifest[i]
|
||||
if err := checkManifestItemsMatch(item, &newItem); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
i := len(w.manifest)
|
||||
w.manifestByConfig[configDigest] = i
|
||||
w.manifest = append(w.manifest, newItem)
|
||||
item = &w.manifest[i]
|
||||
}
|
||||
|
||||
knownRepoTags := map[string]struct{}{}
|
||||
for _, repoTag := range item.RepoTags {
|
||||
knownRepoTags[repoTag] = struct{}{}
|
||||
}
|
||||
for _, tag := range repoTags {
|
||||
// For github.com/docker/docker consumers, this works just as well as
|
||||
// refString := ref.String()
|
||||
// because when reading the RepoTags strings, github.com/docker/docker/reference
|
||||
// normalizes both of them to the same value.
|
||||
//
|
||||
// Doing it this way to include the normalized-out `docker.io[/library]` does make
|
||||
// a difference for github.com/projectatomic/docker consumers, with the
|
||||
// “Add --add-registry and --block-registry options to docker daemon” patch.
|
||||
// These consumers treat reference strings which include a hostname and reference
|
||||
// strings without a hostname differently.
|
||||
//
|
||||
// Using the host name here is more explicit about the intent, and it has the same
|
||||
// effect as (docker pull) in projectatomic/docker, which tags the result using
|
||||
// a hostname-qualified reference.
|
||||
// See https://github.com/containers/image/issues/72 for a more detailed
|
||||
// analysis and explanation.
|
||||
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
|
||||
|
||||
if _, ok := knownRepoTags[refString]; !ok {
|
||||
item.RepoTags = append(item.RepoTags, refString)
|
||||
knownRepoTags[refString] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close writes all outstanding data about images to the archive, and finishes writing data
|
||||
// to the underlying io.Writer.
|
||||
// No more images can be added after this is called.
|
||||
func (w *Writer) Close() error {
|
||||
if err := w.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.unlock()
|
||||
|
||||
b, err := json.Marshal(&w.manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.sendBytesLocked(manifestFileName, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err = json.Marshal(w.repositories)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error marshaling repositories")
|
||||
}
|
||||
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
|
||||
return errors.Wrap(err, "Error writing config json file")
|
||||
}
|
||||
|
||||
if err := w.tar.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.tar = nil // Mark the Writer as closed.
|
||||
return nil
|
||||
}
|
||||
|
||||
// configPath returns a path we choose for storing a config with the specified digest.
|
||||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||
// any time.
|
||||
func (w *Writer) configPath(configDigest digest.Digest) string {
|
||||
return configDigest.Hex() + ".json"
|
||||
}
|
||||
|
||||
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
|
||||
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
|
||||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||
// any time.
|
||||
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
|
||||
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
|
||||
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
|
||||
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
|
||||
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
|
||||
// in the root of the tarball.
|
||||
return layerDigest.Hex() + ".tar"
|
||||
}
|
||||
|
||||
type tarFI struct {
|
||||
path string
|
||||
size int64
|
||||
isSymlink bool
|
||||
}
|
||||
|
||||
func (t *tarFI) Name() string {
|
||||
return t.path
|
||||
}
|
||||
func (t *tarFI) Size() int64 {
|
||||
return t.size
|
||||
}
|
||||
func (t *tarFI) Mode() os.FileMode {
|
||||
if t.isSymlink {
|
||||
return os.ModeSymlink
|
||||
}
|
||||
return 0444
|
||||
}
|
||||
func (t *tarFI) ModTime() time.Time {
|
||||
return time.Unix(0, 0)
|
||||
}
|
||||
func (t *tarFI) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (t *tarFI) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendSymlinkLocked sends a symlink into the tar stream.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) sendSymlinkLocked(path string, target string) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Sending as tar link %s -> %s", path, target)
|
||||
return w.tar.WriteHeader(hdr)
|
||||
}
|
||||
|
||||
// sendBytesLocked sends a path into the tar stream.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) sendBytesLocked(path string, b []byte) error {
|
||||
return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b))
|
||||
}
|
||||
|
||||
// sendFileLocked sends a file into the tar stream.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Sending as tar file %s", path)
|
||||
if err := w.tar.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
size, err := io.Copy(w.tar, stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size != expectedSize {
|
||||
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
|
||||
}
|
||||
return nil
|
||||
}
|
21
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
21
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
@ -26,6 +27,9 @@ var systemRegistriesDirPath = builtinRegistriesDirPath
|
||||
// DO NOT change this, instead see systemRegistriesDirPath above.
|
||||
const builtinRegistriesDirPath = "/etc/containers/registries.d"
|
||||
|
||||
// userRegistriesDirPath is the path to the per user registries.d.
|
||||
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
|
||||
|
||||
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
|
||||
// NOTE: Keep this in sync with docs/registries.d.md!
|
||||
type registryConfiguration struct {
|
||||
@ -75,14 +79,17 @@ func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReferenc
|
||||
|
||||
// registriesDirPath returns a path to registries.d
|
||||
func registriesDirPath(sys *types.SystemContext) string {
|
||||
if sys != nil {
|
||||
if sys.RegistriesDirPath != "" {
|
||||
return sys.RegistriesDirPath
|
||||
}
|
||||
if sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
|
||||
}
|
||||
if sys != nil && sys.RegistriesDirPath != "" {
|
||||
return sys.RegistriesDirPath
|
||||
}
|
||||
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
|
||||
if _, err := os.Stat(userRegistriesDirPath); err == nil {
|
||||
return userRegistriesDirPath
|
||||
}
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
|
||||
}
|
||||
|
||||
return systemRegistriesDirPath
|
||||
}
|
||||
|
||||
|
424
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
424
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
@ -1,424 +0,0 @@
|
||||
package tarfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
|
||||
type Destination struct {
|
||||
writer io.Writer
|
||||
tar *tar.Writer
|
||||
repoTags []reference.NamedTagged
|
||||
// Other state.
|
||||
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
|
||||
config []byte
|
||||
sysCtx *types.SystemContext
|
||||
}
|
||||
|
||||
// NewDestination returns a tarfile.Destination for the specified io.Writer.
|
||||
// Deprecated: please use NewDestinationWithContext instead
|
||||
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
|
||||
return NewDestinationWithContext(nil, dest, ref)
|
||||
}
|
||||
|
||||
// NewDestinationWithContext returns a tarfile.Destination for the specified io.Writer.
|
||||
func NewDestinationWithContext(sys *types.SystemContext, dest io.Writer, ref reference.NamedTagged) *Destination {
|
||||
repoTags := []reference.NamedTagged{}
|
||||
if ref != nil {
|
||||
repoTags = append(repoTags, ref)
|
||||
}
|
||||
return &Destination{
|
||||
writer: dest,
|
||||
tar: tar.NewWriter(dest),
|
||||
repoTags: repoTags,
|
||||
blobs: make(map[digest.Digest]types.BlobInfo),
|
||||
sysCtx: sys,
|
||||
}
|
||||
}
|
||||
|
||||
// AddRepoTags adds the specified tags to the destination's repoTags.
|
||||
func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
|
||||
d.repoTags = append(d.repoTags, tags...)
|
||||
}
|
||||
|
||||
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
||||
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
||||
func (d *Destination) SupportedManifestMIMETypes() []string {
|
||||
return []string{
|
||||
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
|
||||
}
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *Destination) SupportsSignatures(ctx context.Context) error {
|
||||
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
// uploaded to the image destination, true otherwise.
|
||||
func (d *Destination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *Destination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *Destination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
|
||||
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer os.Remove(streamCopy.Name())
|
||||
defer streamCopy.Close()
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
tee := io.TeeReader(stream, digester.Hash())
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
size, err := io.Copy(streamCopy, tee)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
_, err = streamCopy.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
|
||||
if inputInfo.Digest == "" {
|
||||
inputInfo.Digest = digester.Digest()
|
||||
}
|
||||
stream = streamCopy
|
||||
logrus.Debugf("... streaming done")
|
||||
}
|
||||
|
||||
// Maybe the blob has been already sent
|
||||
ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if ok {
|
||||
return reusedInfo, nil
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
|
||||
}
|
||||
d.config = buf
|
||||
if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
|
||||
}
|
||||
} else {
|
||||
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
|
||||
// writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described
|
||||
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
|
||||
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
|
||||
// in the root of the tarball.
|
||||
if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
}
|
||||
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
}
|
||||
if blob, ok := d.blobs[info.Digest]; ok {
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
|
||||
}
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
|
||||
repositories := map[string]map[string]string{}
|
||||
for _, repoTag := range d.repoTags {
|
||||
if val, ok := repositories[repoTag.Name()]; ok {
|
||||
val[repoTag.Tag()] = rootLayerID
|
||||
} else {
|
||||
repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID}
|
||||
}
|
||||
}
|
||||
|
||||
b, err := json.Marshal(repositories)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error marshaling repositories")
|
||||
}
|
||||
if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil {
|
||||
return errors.Wrap(err, "Error writing config json file")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
|
||||
// there can be no secondary manifests.
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||
func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
|
||||
if instanceDigest != nil {
|
||||
return errors.New(`Manifest lists are not supported for docker tar files`)
|
||||
}
|
||||
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
|
||||
// so the caller trying a different manifest kind would be pointless.
|
||||
var man manifest.Schema2
|
||||
if err := json.Unmarshal(m, &man); err != nil {
|
||||
return errors.Wrap(err, "Error parsing manifest")
|
||||
}
|
||||
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
||||
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||
}
|
||||
|
||||
layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(man.LayersDescriptors) > 0 {
|
||||
if err := d.createRepositoriesFile(lastLayerID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
repoTags := []string{}
|
||||
for _, tag := range d.repoTags {
|
||||
// For github.com/docker/docker consumers, this works just as well as
|
||||
// refString := ref.String()
|
||||
// because when reading the RepoTags strings, github.com/docker/docker/reference
|
||||
// normalizes both of them to the same value.
|
||||
//
|
||||
// Doing it this way to include the normalized-out `docker.io[/library]` does make
|
||||
// a difference for github.com/projectatomic/docker consumers, with the
|
||||
// “Add --add-registry and --block-registry options to docker daemon” patch.
|
||||
// These consumers treat reference strings which include a hostname and reference
|
||||
// strings without a hostname differently.
|
||||
//
|
||||
// Using the host name here is more explicit about the intent, and it has the same
|
||||
// effect as (docker pull) in projectatomic/docker, which tags the result using
|
||||
// a hostname-qualified reference.
|
||||
// See https://github.com/containers/image/issues/72 for a more detailed
|
||||
// analysis and explanation.
|
||||
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
|
||||
repoTags = append(repoTags, refString)
|
||||
}
|
||||
|
||||
items := []ManifestItem{{
|
||||
Config: man.ConfigDescriptor.Digest.Hex() + ".json",
|
||||
RepoTags: repoTags,
|
||||
Layers: layerPaths,
|
||||
Parent: "",
|
||||
LayerSources: nil,
|
||||
}}
|
||||
itemsBytes, err := json.Marshal(&items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME? Do we also need to support the legacy format?
|
||||
return d.sendBytes(manifestFileName, itemsBytes)
|
||||
}
|
||||
|
||||
// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers
|
||||
func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) {
|
||||
var chainID digest.Digest
|
||||
lastLayerID = ""
|
||||
for i, l := range layerDescriptors {
|
||||
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
|
||||
if chainID == "" {
|
||||
chainID = l.Digest
|
||||
} else {
|
||||
chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
|
||||
}
|
||||
// … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent
|
||||
// versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop.
|
||||
//
|
||||
// Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID
|
||||
// (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more
|
||||
// times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation
|
||||
// which also mixes in the full image configuration seems unnecessary, at least as long as we are storing
|
||||
// only a single image per tarball, i.e. all DiffID prefixes are unique (can’t differ only with
|
||||
// configuration).
|
||||
layerID := chainID.Hex()
|
||||
|
||||
physicalLayerPath := l.Digest.Hex() + ".tar"
|
||||
// The layer itself has been stored into physicalLayerPath in PutManifest.
|
||||
// So, use that path for layerPaths used in the non-legacy manifest
|
||||
layerPaths = append(layerPaths, physicalLayerPath)
|
||||
// ... and create a symlink for the legacy format;
|
||||
if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
|
||||
return nil, "", errors.Wrap(err, "Error creating layer symbolic link")
|
||||
}
|
||||
|
||||
b := []byte("1.0")
|
||||
if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
|
||||
return nil, "", errors.Wrap(err, "Error writing VERSION file")
|
||||
}
|
||||
|
||||
// The legacy format requires a config file per layer
|
||||
layerConfig := make(map[string]interface{})
|
||||
layerConfig["id"] = layerID
|
||||
|
||||
// The root layer doesn't have any parent
|
||||
if lastLayerID != "" {
|
||||
layerConfig["parent"] = lastLayerID
|
||||
}
|
||||
// The root layer configuration file is generated by using subpart of the image configuration
|
||||
if i == len(layerDescriptors)-1 {
|
||||
var config map[string]*json.RawMessage
|
||||
err := json.Unmarshal(d.config, &config)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "Error unmarshaling config")
|
||||
}
|
||||
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
|
||||
layerConfig[attr] = config[attr]
|
||||
}
|
||||
}
|
||||
b, err := json.Marshal(layerConfig)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "Error marshaling layer config")
|
||||
}
|
||||
if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil {
|
||||
return nil, "", errors.Wrap(err, "Error writing config json file")
|
||||
}
|
||||
|
||||
lastLayerID = layerID
|
||||
}
|
||||
return layerPaths, lastLayerID, nil
|
||||
}
|
||||
|
||||
type tarFI struct {
|
||||
path string
|
||||
size int64
|
||||
isSymlink bool
|
||||
}
|
||||
|
||||
func (t *tarFI) Name() string {
|
||||
return t.path
|
||||
}
|
||||
func (t *tarFI) Size() int64 {
|
||||
return t.size
|
||||
}
|
||||
func (t *tarFI) Mode() os.FileMode {
|
||||
if t.isSymlink {
|
||||
return os.ModeSymlink
|
||||
}
|
||||
return 0444
|
||||
}
|
||||
func (t *tarFI) ModTime() time.Time {
|
||||
return time.Unix(0, 0)
|
||||
}
|
||||
func (t *tarFI) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (t *tarFI) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendSymlink sends a symlink into the tar stream.
|
||||
func (d *Destination) sendSymlink(path string, target string) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Sending as tar link %s -> %s", path, target)
|
||||
return d.tar.WriteHeader(hdr)
|
||||
}
|
||||
|
||||
// sendBytes sends a path into the tar stream.
|
||||
func (d *Destination) sendBytes(path string, b []byte) error {
|
||||
return d.sendFile(path, int64(len(b)), bytes.NewReader(b))
|
||||
}
|
||||
|
||||
// sendFile sends a file into the tar stream.
|
||||
func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Sending as tar file %s", path)
|
||||
if err := d.tar.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
size, err := io.Copy(d.tar, stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size != expectedSize {
|
||||
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
|
||||
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
|
||||
// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
|
||||
func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
|
||||
if instanceDigest != nil {
|
||||
return errors.Errorf(`Manifest lists are not supported for docker tar files`)
|
||||
}
|
||||
if len(signatures) != 0 {
|
||||
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit finishes writing data to the underlying io.Writer.
|
||||
// It is the caller's responsibility to close it, if necessary.
|
||||
func (d *Destination) Commit(ctx context.Context) error {
|
||||
return d.tar.Close()
|
||||
}
|
3
vendor/github.com/containers/image/v5/docker/tarfile/doc.go
generated
vendored
3
vendor/github.com/containers/image/v5/docker/tarfile/doc.go
generated
vendored
@ -1,3 +0,0 @@
|
||||
// Package tarfile is an internal implementation detail of some transports.
|
||||
// Do not use outside of the github.com/containers/image repo!
|
||||
package tarfile
|
7
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
7
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
@ -183,7 +183,12 @@ func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (temp
|
||||
src := ref.resolvedFile
|
||||
dst := tempDirRef.tempDirectory
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
if err := archive.UntarPath(src, dst); err != nil {
|
||||
arch, err := os.Open(src)
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, err
|
||||
}
|
||||
defer arch.Close()
|
||||
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
4
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
@ -141,6 +141,10 @@ func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *dige
|
||||
}
|
||||
|
||||
func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
|
||||
if len(urls) == 0 {
|
||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
|
||||
errWrap := errors.New("failed fetching external blob from all urls")
|
||||
for _, url := range urls {
|
||||
|
||||
|
4
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
4
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
@ -11,9 +11,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
helperclient "github.com/docker/docker-credential-helpers/client"
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -345,7 +345,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
|
||||
return errors.Wrapf(err, "error marshaling JSON %q", path)
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(path, newData, 0755); err != nil {
|
||||
if err = ioutil.WriteFile(path, newData, 0600); err != nil {
|
||||
return errors.Wrapf(err, "error writing to file %q", path)
|
||||
}
|
||||
}
|
||||
|
167
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
167
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@ -338,55 +338,86 @@ func (config *V2RegistriesConf) postProcess() error {
|
||||
}
|
||||
|
||||
// ConfigPath returns the path to the system-wide registry configuration file.
|
||||
// Deprecated: This API implies configuration is read from files, and that there is only one.
|
||||
// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
|
||||
func ConfigPath(ctx *types.SystemContext) string {
|
||||
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
|
||||
return ctx.SystemRegistriesConfPath
|
||||
}
|
||||
|
||||
userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
|
||||
if _, err := os.Stat(userRegistriesFilePath); err == nil {
|
||||
return userRegistriesFilePath
|
||||
}
|
||||
|
||||
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
|
||||
}
|
||||
|
||||
return systemRegistriesConfPath
|
||||
return newConfigWrapper(ctx).configPath
|
||||
}
|
||||
|
||||
// ConfigDirPath returns the path to the system-wide directory for drop-in
|
||||
// ConfigDirPath returns the path to the directory for drop-in
|
||||
// registry configuration files.
|
||||
// Deprecated: This API implies configuration is read from directories, and that there is only one.
|
||||
// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
|
||||
func ConfigDirPath(ctx *types.SystemContext) string {
|
||||
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
|
||||
return ctx.SystemRegistriesConfDirPath
|
||||
configWrapper := newConfigWrapper(ctx)
|
||||
if configWrapper.userConfigDirPath != "" {
|
||||
return configWrapper.userConfigDirPath
|
||||
}
|
||||
|
||||
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
|
||||
if _, err := os.Stat(userRegistriesDirPath); err == nil {
|
||||
return userRegistriesDirPath
|
||||
}
|
||||
|
||||
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
|
||||
}
|
||||
|
||||
return systemRegistriesConfDirPath
|
||||
return configWrapper.configDirPath
|
||||
}
|
||||
|
||||
// configWrapper is used to store the paths from ConfigPath and ConfigDirPath
|
||||
// and acts as a key to the internal cache.
|
||||
type configWrapper struct {
|
||||
configPath string
|
||||
// path to the registries.conf file
|
||||
configPath string
|
||||
// path to system-wide registries.conf.d directory, or "" if not used
|
||||
configDirPath string
|
||||
// path to user specificed registries.conf.d directory, or "" if not used
|
||||
userConfigDirPath string
|
||||
}
|
||||
|
||||
// newConfigWrapper returns a configWrapper for the specified SystemContext.
|
||||
func newConfigWrapper(ctx *types.SystemContext) configWrapper {
|
||||
return configWrapper{
|
||||
configPath: ConfigPath(ctx),
|
||||
configDirPath: ConfigDirPath(ctx),
|
||||
var wrapper configWrapper
|
||||
userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
|
||||
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
|
||||
|
||||
// decide configPath using per-user path or system file
|
||||
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
|
||||
wrapper.configPath = ctx.SystemRegistriesConfPath
|
||||
} else if _, err := os.Stat(userRegistriesFilePath); err == nil {
|
||||
// per-user registries.conf exists, not reading system dir
|
||||
// return config dirs from ctx or per-user one
|
||||
wrapper.configPath = userRegistriesFilePath
|
||||
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
|
||||
wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
|
||||
} else {
|
||||
wrapper.userConfigDirPath = userRegistriesDirPath
|
||||
}
|
||||
return wrapper
|
||||
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
|
||||
wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
|
||||
} else {
|
||||
wrapper.configPath = systemRegistriesConfPath
|
||||
}
|
||||
|
||||
// potentially use both system and per-user dirs if not using per-user config file
|
||||
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
|
||||
// dir explicitly chosen: use only that one
|
||||
wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
|
||||
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
|
||||
wrapper.configDirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
|
||||
wrapper.userConfigDirPath = userRegistriesDirPath
|
||||
} else {
|
||||
wrapper.configDirPath = systemRegistriesConfDirPath
|
||||
wrapper.userConfigDirPath = userRegistriesDirPath
|
||||
}
|
||||
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// ConfigurationSourceDescription returns a string containres paths of registries.conf and registries.conf.d
|
||||
func ConfigurationSourceDescription(ctx *types.SystemContext) string {
|
||||
wrapper := newConfigWrapper(ctx)
|
||||
configSources := []string{wrapper.configPath}
|
||||
if wrapper.configDirPath != "" {
|
||||
configSources = append(configSources, wrapper.configDirPath)
|
||||
}
|
||||
if wrapper.userConfigDirPath != "" {
|
||||
configSources = append(configSources, wrapper.userConfigDirPath)
|
||||
}
|
||||
return strings.Join(configSources, ", ")
|
||||
}
|
||||
|
||||
// configMutex is used to synchronize concurrent accesses to configCache.
|
||||
@ -422,39 +453,49 @@ func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
|
||||
// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d
|
||||
// directory.
|
||||
func dropInConfigs(wrapper configWrapper) ([]string, error) {
|
||||
var configs []string
|
||||
|
||||
err := filepath.Walk(wrapper.configDirPath,
|
||||
// WalkFunc to read additional configs
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
switch {
|
||||
case err != nil:
|
||||
// return error (could be a permission problem)
|
||||
return err
|
||||
case info == nil:
|
||||
// this should only happen when err != nil but let's be sure
|
||||
return nil
|
||||
case info.IsDir():
|
||||
if path != wrapper.configDirPath {
|
||||
// make sure to not recurse into sub-directories
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// ignore directories
|
||||
return nil
|
||||
default:
|
||||
// only add *.conf files
|
||||
if strings.HasSuffix(path, ".conf") {
|
||||
configs = append(configs, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
},
|
||||
var (
|
||||
configs []string
|
||||
dirPaths []string
|
||||
)
|
||||
if wrapper.configDirPath != "" {
|
||||
dirPaths = append(dirPaths, wrapper.configDirPath)
|
||||
}
|
||||
if wrapper.userConfigDirPath != "" {
|
||||
dirPaths = append(dirPaths, wrapper.userConfigDirPath)
|
||||
}
|
||||
for _, dirPath := range dirPaths {
|
||||
err := filepath.Walk(dirPath,
|
||||
// WalkFunc to read additional configs
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
switch {
|
||||
case err != nil:
|
||||
// return error (could be a permission problem)
|
||||
return err
|
||||
case info == nil:
|
||||
// this should only happen when err != nil but let's be sure
|
||||
return nil
|
||||
case info.IsDir():
|
||||
if path != dirPath {
|
||||
// make sure to not recurse into sub-directories
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// ignore directories
|
||||
return nil
|
||||
default:
|
||||
// only add *.conf files
|
||||
if strings.HasSuffix(path, ".conf") {
|
||||
configs = append(configs, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
|
||||
// directory.
|
||||
return nil, errors.Wrapf(err, "error reading registries.conf.d")
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
|
||||
// directory.
|
||||
return nil, errors.Wrapf(err, "error reading registries.conf.d")
|
||||
}
|
||||
}
|
||||
|
||||
return configs, nil
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -6,9 +6,9 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 5
|
||||
VersionMinor = 6
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 2
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
75
vendor/github.com/imdario/mergo/README.md
generated
vendored
75
vendor/github.com/imdario/mergo/README.md
generated
vendored
@ -1,44 +1,54 @@
|
||||
# Mergo
|
||||
|
||||
|
||||
[![GoDoc][3]][4]
|
||||
[![GitHub release][5]][6]
|
||||
[![GoCard][7]][8]
|
||||
[![Build Status][1]][2]
|
||||
[![Coverage Status][9]][10]
|
||||
[![Sourcegraph][11]][12]
|
||||
[![FOSSA Status][13]][14]
|
||||
|
||||
[![GoCenter Kudos][15]][16]
|
||||
|
||||
[1]: https://travis-ci.org/imdario/mergo.png
|
||||
[2]: https://travis-ci.org/imdario/mergo
|
||||
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
|
||||
[4]: https://godoc.org/github.com/imdario/mergo
|
||||
[5]: https://img.shields.io/github/release/imdario/mergo.svg
|
||||
[6]: https://github.com/imdario/mergo/releases
|
||||
[7]: https://goreportcard.com/badge/imdario/mergo
|
||||
[8]: https://goreportcard.com/report/github.com/imdario/mergo
|
||||
[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
|
||||
[10]: https://coveralls.io/github/imdario/mergo?branch=master
|
||||
[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
|
||||
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
|
||||
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
|
||||
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
|
||||
[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
|
||||
[16]: https://search.gocenter.io/github.com/imdario/mergo
|
||||
|
||||
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
||||
|
||||
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
|
||||
|
||||
## Status
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
|
||||
[![GoDoc][3]][4]
|
||||
[![GoCard][5]][6]
|
||||
[![Build Status][1]][2]
|
||||
[![Coverage Status][7]][8]
|
||||
[![Sourcegraph][9]][10]
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
|
||||
|
||||
[1]: https://travis-ci.org/imdario/mergo.png
|
||||
[2]: https://travis-ci.org/imdario/mergo
|
||||
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
|
||||
[4]: https://godoc.org/github.com/imdario/mergo
|
||||
[5]: https://goreportcard.com/badge/imdario/mergo
|
||||
[6]: https://goreportcard.com/report/github.com/imdario/mergo
|
||||
[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
|
||||
[8]: https://coveralls.io/github/imdario/mergo?branch=master
|
||||
[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
|
||||
[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
|
||||
|
||||
### Latest release
|
||||
|
||||
[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7).
|
||||
|
||||
### Important note
|
||||
|
||||
Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
|
||||
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
|
||||
|
||||
If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
|
||||
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
|
||||
|
||||
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
|
||||
|
||||
### Donations
|
||||
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
|
||||
|
||||
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||
[](https://beerpay.io/imdario/mergo)
|
||||
@ -87,8 +97,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
|
||||
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
|
||||
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
|
||||
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
|
||||
- [janoszen/containerssh](https://github.com/janoszen/containerssh)
|
||||
|
||||
## Installation
|
||||
## Install
|
||||
|
||||
go get github.com/imdario/mergo
|
||||
|
||||
@ -99,7 +110,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
|
||||
|
||||
## Usage
|
||||
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
```go
|
||||
if err := mergo.Merge(&dst, src); err != nil {
|
||||
@ -125,9 +136,7 @@ if err := mergo.Map(&dst, srcMap); err != nil {
|
||||
|
||||
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
|
||||
|
||||
More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
|
||||
|
||||
### Nice example
|
||||
Here is a nice example:
|
||||
|
||||
```go
|
||||
package main
|
||||
@ -175,10 +184,10 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type timeTransfomer struct {
|
||||
type timeTransformer struct {
|
||||
}
|
||||
|
||||
func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
|
||||
func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
|
||||
if typ == reflect.TypeOf(time.Time{}) {
|
||||
return func(dst, src reflect.Value) error {
|
||||
if dst.CanSet() {
|
||||
@ -202,7 +211,7 @@ type Snapshot struct {
|
||||
func main() {
|
||||
src := Snapshot{time.Now()}
|
||||
dest := Snapshot{}
|
||||
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
|
||||
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
|
||||
|
141
vendor/github.com/imdario/mergo/doc.go
generated
vendored
141
vendor/github.com/imdario/mergo/doc.go
generated
vendored
@ -4,41 +4,140 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package mergo merges same-type structs and maps by setting default values in zero-value fields.
|
||||
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
||||
|
||||
Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
|
||||
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
Status
|
||||
|
||||
It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
|
||||
|
||||
Important note
|
||||
|
||||
Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
|
||||
|
||||
Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
|
||||
|
||||
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
|
||||
|
||||
Install
|
||||
|
||||
Do your usual installation procedure:
|
||||
|
||||
go get github.com/imdario/mergo
|
||||
|
||||
// use in your .go code
|
||||
import (
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
Usage
|
||||
|
||||
From my own work-in-progress project:
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
type networkConfig struct {
|
||||
Protocol string
|
||||
Address string
|
||||
ServerType string `json: "server_type"`
|
||||
Port uint16
|
||||
if err := mergo.Merge(&dst, src); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
type FssnConfig struct {
|
||||
Network networkConfig
|
||||
Also, you can merge overwriting values using the transformer WithOverride.
|
||||
|
||||
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
var fssnDefault = FssnConfig {
|
||||
networkConfig {
|
||||
"tcp",
|
||||
"127.0.0.1",
|
||||
"http",
|
||||
31560,
|
||||
},
|
||||
Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
|
||||
|
||||
if err := mergo.Map(&dst, srcMap); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
// Inside a function [...]
|
||||
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
|
||||
|
||||
if err := mergo.Merge(&config, fssnDefault); err != nil {
|
||||
log.Fatal(err)
|
||||
Here is a nice example:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
A string
|
||||
B int64
|
||||
}
|
||||
|
||||
// More code [...]
|
||||
func main() {
|
||||
src := Foo{
|
||||
A: "one",
|
||||
B: 2,
|
||||
}
|
||||
dest := Foo{
|
||||
A: "two",
|
||||
}
|
||||
mergo.Merge(&dest, src)
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// {two 2}
|
||||
}
|
||||
|
||||
Transformers
|
||||
|
||||
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type timeTransformer struct {
|
||||
}
|
||||
|
||||
func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
|
||||
if typ == reflect.TypeOf(time.Time{}) {
|
||||
return func(dst, src reflect.Value) error {
|
||||
if dst.CanSet() {
|
||||
isZero := dst.MethodByName("IsZero")
|
||||
result := isZero.Call([]reflect.Value{})
|
||||
if result[0].Bool() {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Time time.Time
|
||||
// ...
|
||||
}
|
||||
|
||||
func main() {
|
||||
src := Snapshot{time.Now()}
|
||||
dest := Snapshot{}
|
||||
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
|
||||
}
|
||||
|
||||
Contact me
|
||||
|
||||
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
|
||||
|
||||
About
|
||||
|
||||
Written by Dario Castañé: https://da.rio.hn
|
||||
|
||||
License
|
||||
|
||||
BSD 3-Clause license, as Go language.
|
||||
|
||||
*/
|
||||
package mergo
|
||||
|
5
vendor/github.com/imdario/mergo/go.mod
generated
vendored
Normal file
5
vendor/github.com/imdario/mergo/go.mod
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
module github.com/imdario/mergo
|
||||
|
||||
go 1.13
|
||||
|
||||
require gopkg.in/yaml.v2 v2.3.0
|
4
vendor/github.com/imdario/mergo/go.sum
generated
vendored
Normal file
4
vendor/github.com/imdario/mergo/go.sum
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
10
vendor/github.com/imdario/mergo/map.go
generated
vendored
10
vendor/github.com/imdario/mergo/map.go
generated
vendored
@ -99,11 +99,11 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
||||
continue
|
||||
}
|
||||
if srcKind == dstKind {
|
||||
if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
|
||||
if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else if srcKind == reflect.Map {
|
||||
@ -141,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
}
|
||||
|
||||
func _map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
|
||||
return ErrNonPointerAgument
|
||||
}
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
@ -157,8 +160,7 @@ func _map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
// To be friction-less, we redirect equal-type arguments
|
||||
// to deepMerge. Only because arguments can be anything.
|
||||
if vSrc.Kind() == vDst.Kind() {
|
||||
_, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
return err
|
||||
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
}
|
||||
switch vSrc.Kind() {
|
||||
case reflect.Struct:
|
||||
|
273
vendor/github.com/imdario/mergo/merge.go
generated
vendored
273
vendor/github.com/imdario/mergo/merge.go
generated
vendored
@ -11,26 +11,26 @@ package mergo
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func hasExportedField(dst reflect.Value) (exported bool) {
|
||||
func hasMergeableFields(dst reflect.Value) (exported bool) {
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
field := dst.Type().Field(i)
|
||||
if isExportedComponent(&field) {
|
||||
return true
|
||||
if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
|
||||
exported = exported || hasMergeableFields(dst.Field(i))
|
||||
} else if isExportedComponent(&field) {
|
||||
exported = exported || len(field.PkgPath) == 0
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isExportedComponent(field *reflect.StructField) bool {
|
||||
name := field.Name
|
||||
pkgPath := field.PkgPath
|
||||
if len(pkgPath) > 0 {
|
||||
return false
|
||||
}
|
||||
c := name[0]
|
||||
c := field.Name[0]
|
||||
if 'a' <= c && c <= 'z' || c == '_' {
|
||||
return false
|
||||
}
|
||||
@ -44,6 +44,8 @@ type Config struct {
|
||||
Transformers Transformers
|
||||
overwriteWithEmptyValue bool
|
||||
overwriteSliceWithEmptyValue bool
|
||||
sliceDeepCopy bool
|
||||
debug bool
|
||||
}
|
||||
|
||||
type Transformers interface {
|
||||
@ -53,17 +55,16 @@ type Transformers interface {
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) {
|
||||
dst = dstIn
|
||||
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
|
||||
overwrite := config.Overwrite
|
||||
typeCheck := config.TypeCheck
|
||||
overwriteWithEmptySrc := config.overwriteWithEmptyValue
|
||||
overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
|
||||
sliceDeepCopy := config.sliceDeepCopy
|
||||
|
||||
if !src.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
if dst.CanAddr() {
|
||||
addr := dst.UnsafeAddr()
|
||||
h := 17 * addr
|
||||
@ -71,7 +72,7 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
|
||||
typ := dst.Type()
|
||||
for p := seen; p != nil; p = p.next {
|
||||
if p.ptr == addr && p.typ == typ {
|
||||
return dst, nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Remember, remember...
|
||||
@ -85,125 +86,153 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
|
||||
}
|
||||
}
|
||||
|
||||
if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() {
|
||||
err = fmt.Errorf("cannot append two different types (%s, %s)", src.Kind(), dst.Kind())
|
||||
return
|
||||
}
|
||||
|
||||
switch dst.Kind() {
|
||||
case reflect.Struct:
|
||||
if hasExportedField(dst) {
|
||||
dstCp := reflect.New(dst.Type()).Elem()
|
||||
if hasMergeableFields(dst) {
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
dstField := dst.Field(i)
|
||||
structField := dst.Type().Field(i)
|
||||
// copy un-exported struct fields
|
||||
if !isExportedComponent(&structField) {
|
||||
rf := dstCp.Field(i)
|
||||
rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec
|
||||
dstRF := dst.Field(i)
|
||||
if !dst.Field(i).CanAddr() {
|
||||
continue
|
||||
}
|
||||
|
||||
dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec
|
||||
rf.Set(dstRF)
|
||||
continue
|
||||
}
|
||||
dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config)
|
||||
if err != nil {
|
||||
if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
dstCp.Field(i).Set(dstField)
|
||||
}
|
||||
|
||||
if dst.CanSet() {
|
||||
dst.Set(dstCp)
|
||||
} else {
|
||||
dst = dstCp
|
||||
}
|
||||
return
|
||||
} else {
|
||||
if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
|
||||
dst = src
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if dst.IsNil() && !src.IsNil() {
|
||||
if dst.CanSet() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
} else {
|
||||
dst = src
|
||||
return
|
||||
}
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
|
||||
if src.Kind() != reflect.Map {
|
||||
if overwrite {
|
||||
dst.Set(src)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, key := range src.MapKeys() {
|
||||
srcElement := src.MapIndex(key)
|
||||
dstElement := dst.MapIndex(key)
|
||||
if !srcElement.IsValid() {
|
||||
continue
|
||||
}
|
||||
if dst.MapIndex(key).IsValid() {
|
||||
k := dstElement.Interface()
|
||||
dstElement = reflect.ValueOf(k)
|
||||
}
|
||||
if isReflectNil(srcElement) {
|
||||
if overwrite || isReflectNil(dstElement) {
|
||||
dst.SetMapIndex(key, srcElement)
|
||||
dstElement := dst.MapIndex(key)
|
||||
switch srcElement.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
|
||||
if srcElement.IsNil() {
|
||||
if overwrite {
|
||||
dst.SetMapIndex(key, srcElement)
|
||||
}
|
||||
continue
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if !srcElement.CanInterface() {
|
||||
continue
|
||||
}
|
||||
switch reflect.TypeOf(srcElement.Interface()).Kind() {
|
||||
case reflect.Struct:
|
||||
fallthrough
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
srcMapElm := srcElement
|
||||
dstMapElm := dstElement
|
||||
if srcMapElm.CanInterface() {
|
||||
srcMapElm = reflect.ValueOf(srcMapElm.Interface())
|
||||
if dstMapElm.IsValid() {
|
||||
dstMapElm = reflect.ValueOf(dstMapElm.Interface())
|
||||
}
|
||||
}
|
||||
if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
case reflect.Slice:
|
||||
srcSlice := reflect.ValueOf(srcElement.Interface())
|
||||
|
||||
var dstSlice reflect.Value
|
||||
if !dstElement.IsValid() || dstElement.IsNil() {
|
||||
dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
|
||||
} else {
|
||||
dstSlice = reflect.ValueOf(dstElement.Interface())
|
||||
}
|
||||
|
||||
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
|
||||
if typeCheck && srcSlice.Type() != dstSlice.Type() {
|
||||
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
|
||||
}
|
||||
dstSlice = srcSlice
|
||||
} else if config.AppendSlice {
|
||||
if srcSlice.Type() != dstSlice.Type() {
|
||||
return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
|
||||
}
|
||||
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
|
||||
} else if sliceDeepCopy {
|
||||
i := 0
|
||||
for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
|
||||
srcElement := srcSlice.Index(i)
|
||||
dstElement := dstSlice.Index(i)
|
||||
|
||||
if srcElement.CanInterface() {
|
||||
srcElement = reflect.ValueOf(srcElement.Interface())
|
||||
}
|
||||
if dstElement.CanInterface() {
|
||||
dstElement = reflect.ValueOf(dstElement.Interface())
|
||||
}
|
||||
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
dst.SetMapIndex(key, dstSlice)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !srcElement.CanInterface() {
|
||||
if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
|
||||
continue
|
||||
}
|
||||
|
||||
if srcElement.CanInterface() {
|
||||
srcElement = reflect.ValueOf(srcElement.Interface())
|
||||
if dstElement.IsValid() {
|
||||
dstElement = reflect.ValueOf(dstElement.Interface())
|
||||
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
dst.SetMapIndex(key, srcElement)
|
||||
}
|
||||
dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dst.SetMapIndex(key, dstElement)
|
||||
|
||||
}
|
||||
case reflect.Slice:
|
||||
newSlice := dst
|
||||
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
|
||||
if typeCheck && src.Type() != dst.Type() {
|
||||
return dst, fmt.Errorf("cannot override two slices with different type (%s, %s)", src.Type(), dst.Type())
|
||||
}
|
||||
newSlice = src
|
||||
} else if config.AppendSlice {
|
||||
if typeCheck && src.Type() != dst.Type() {
|
||||
err = fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
|
||||
return
|
||||
}
|
||||
newSlice = reflect.AppendSlice(dst, src)
|
||||
}
|
||||
if dst.CanSet() {
|
||||
dst.Set(newSlice)
|
||||
} else {
|
||||
dst = newSlice
|
||||
}
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
if isReflectNil(src) {
|
||||
if !dst.CanSet() {
|
||||
break
|
||||
}
|
||||
|
||||
if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) {
|
||||
if dst.IsNil() || overwrite {
|
||||
if overwrite || isEmptyValue(dst) {
|
||||
if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
} else {
|
||||
dst = src
|
||||
}
|
||||
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
|
||||
dst.Set(src)
|
||||
} else if config.AppendSlice {
|
||||
if src.Type() != dst.Type() {
|
||||
return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
|
||||
}
|
||||
dst.Set(reflect.AppendSlice(dst, src))
|
||||
} else if sliceDeepCopy {
|
||||
for i := 0; i < src.Len() && i < dst.Len(); i++ {
|
||||
srcElement := src.Index(i)
|
||||
dstElement := dst.Index(i)
|
||||
if srcElement.CanInterface() {
|
||||
srcElement = reflect.ValueOf(srcElement.Interface())
|
||||
}
|
||||
if dstElement.CanInterface() {
|
||||
dstElement = reflect.ValueOf(dstElement.Interface())
|
||||
}
|
||||
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Interface:
|
||||
if isReflectNil(src) {
|
||||
if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
|
||||
dst.Set(src)
|
||||
}
|
||||
break
|
||||
}
|
||||
@ -214,33 +243,35 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
|
||||
dst.Set(src)
|
||||
}
|
||||
} else if src.Kind() == reflect.Ptr {
|
||||
if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
dst = dst.Addr()
|
||||
} else if dst.Elem().Type() == src.Type() {
|
||||
if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
|
||||
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return dst, ErrDifferentArgumentsTypes
|
||||
return ErrDifferentArgumentsTypes
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if dst.IsNil() || overwrite {
|
||||
if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) {
|
||||
if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
} else {
|
||||
dst = src
|
||||
}
|
||||
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
|
||||
dst.Set(src)
|
||||
}
|
||||
} else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
break
|
||||
}
|
||||
|
||||
if dst.Elem().Kind() == src.Elem().Kind() {
|
||||
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
default:
|
||||
overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst))
|
||||
if overwriteFull {
|
||||
mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
|
||||
if mustSet {
|
||||
if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
} else {
|
||||
@ -281,6 +312,7 @@ func WithOverride(config *Config) {
|
||||
|
||||
// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
|
||||
func WithOverwriteWithEmptyValue(config *Config) {
|
||||
config.Overwrite = true
|
||||
config.overwriteWithEmptyValue = true
|
||||
}
|
||||
|
||||
@ -299,7 +331,16 @@ func WithTypeCheck(config *Config) {
|
||||
config.TypeCheck = true
|
||||
}
|
||||
|
||||
// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
|
||||
func WithSliceDeepCopy(config *Config) {
|
||||
config.sliceDeepCopy = true
|
||||
config.Overwrite = true
|
||||
}
|
||||
|
||||
func merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
|
||||
return ErrNonPointerAgument
|
||||
}
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
@ -314,14 +355,10 @@ func merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
|
||||
return err
|
||||
}
|
||||
if !vDst.CanSet() {
|
||||
return fmt.Errorf("cannot set dst, needs reference")
|
||||
}
|
||||
if vDst.Type() != vSrc.Type() {
|
||||
return ErrDifferentArgumentsTypes
|
||||
}
|
||||
_, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
return err
|
||||
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
}
|
||||
|
||||
// IsReflectNil is the reflect value provided nil
|
||||
|
21
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
21
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
@ -20,6 +20,7 @@ var (
|
||||
ErrNotSupported = errors.New("only structs and maps are supported")
|
||||
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
|
||||
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
|
||||
ErrNonPointerAgument = errors.New("dst must be a pointer")
|
||||
)
|
||||
|
||||
// During deepMerge, must keep track of checks that are
|
||||
@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
|
||||
if dst.CanAddr() {
|
||||
addr := dst.UnsafeAddr()
|
||||
h := 17 * addr
|
||||
seen := visited[h]
|
||||
typ := dst.Type()
|
||||
for p := seen; p != nil; p = p.next {
|
||||
if p.ptr == addr && p.typ == typ {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Remember, remember...
|
||||
visited[h] = &visit{addr, typ, seen}
|
||||
}
|
||||
return // TODO refactor
|
||||
}
|
||||
|
14
vendor/github.com/mattn/go-isatty/.travis.yml
generated
vendored
14
vendor/github.com/mattn/go-isatty/.travis.yml
generated
vendored
@ -1,14 +0,0 @@
|
||||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.13.x
|
||||
- tip
|
||||
|
||||
before_install:
|
||||
- go get -t -v ./...
|
||||
|
||||
script:
|
||||
- ./go.test.sh
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
9
vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
9
vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
@ -1,9 +0,0 @@
|
||||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||
|
||||
MIT License (Expat)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
50
vendor/github.com/mattn/go-isatty/README.md
generated
vendored
50
vendor/github.com/mattn/go-isatty/README.md
generated
vendored
@ -1,50 +0,0 @@
|
||||
# go-isatty
|
||||
|
||||
[](http://godoc.org/github.com/mattn/go-isatty)
|
||||
[](https://codecov.io/gh/mattn/go-isatty)
|
||||
[](https://coveralls.io/github/mattn/go-isatty?branch=master)
|
||||
[](https://goreportcard.com/report/mattn/go-isatty)
|
||||
|
||||
isatty for golang
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mattn/go-isatty"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
fmt.Println("Is Terminal")
|
||||
} else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
|
||||
fmt.Println("Is Cygwin/MSYS2 Terminal")
|
||||
} else {
|
||||
fmt.Println("Is Not Terminal")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-isatty
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
## Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
||||
|
||||
## Thanks
|
||||
|
||||
* k-takata: base idea for IsCygwinTerminal
|
||||
|
||||
https://github.com/k-takata/go-iscygpty
|
2
vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
2
vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
@ -1,2 +0,0 @@
|
||||
// Package isatty implements interface to isatty
|
||||
package isatty
|
5
vendor/github.com/mattn/go-isatty/go.mod
generated
vendored
5
vendor/github.com/mattn/go-isatty/go.mod
generated
vendored
@ -1,5 +0,0 @@
|
||||
module github.com/mattn/go-isatty
|
||||
|
||||
go 1.12
|
||||
|
||||
require golang.org/x/sys v0.0.0-20200116001909-b77594299b42
|
2
vendor/github.com/mattn/go-isatty/go.sum
generated
vendored
2
vendor/github.com/mattn/go-isatty/go.sum
generated
vendored
@ -1,2 +0,0 @@
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
12
vendor/github.com/mattn/go-isatty/go.test.sh
generated
vendored
12
vendor/github.com/mattn/go-isatty/go.test.sh
generated
vendored
@ -1,12 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v vendor); do
|
||||
go test -race -coverprofile=profile.out -covermode=atomic "$d"
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
18
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
18
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
_, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
15
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
15
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
// +build appengine js nacl
|
||||
|
||||
package isatty
|
||||
|
||||
// IsTerminal returns true if the file descriptor is terminal which
|
||||
// is always false on js and appengine classic which is a sandboxed PaaS.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
22
vendor/github.com/mattn/go-isatty/isatty_plan9.go
generated
vendored
22
vendor/github.com/mattn/go-isatty/isatty_plan9.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
// +build plan9
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
path, err := syscall.Fd2path(int(fd))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return path == "/dev/cons" || path == "/mnt/term/dev/cons"
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
22
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
22
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
// +build solaris
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termio unix.Termio
|
||||
err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
18
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
18
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
// +build linux aix
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
_, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
125
vendor/github.com/mattn/go-isatty/isatty_windows.go
generated
vendored
125
vendor/github.com/mattn/go-isatty/isatty_windows.go
generated
vendored
@ -1,125 +0,0 @@
|
||||
// +build windows
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
objectNameInfo uintptr = 1
|
||||
fileNameInfo = 2
|
||||
fileTypePipe = 3
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
ntdll = syscall.NewLazyDLL("ntdll.dll")
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
|
||||
procGetFileType = kernel32.NewProc("GetFileType")
|
||||
procNtQueryObject = ntdll.NewProc("NtQueryObject")
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Check if GetFileInformationByHandleEx is available.
|
||||
if procGetFileInformationByHandleEx.Find() != nil {
|
||||
procGetFileInformationByHandleEx = nil
|
||||
}
|
||||
}
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
||||
|
||||
// Check pipe name is used for cygwin/msys2 pty.
|
||||
// Cygwin/MSYS2 PTY has a name like:
|
||||
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
|
||||
func isCygwinPipeName(name string) bool {
|
||||
token := strings.Split(name, "-")
|
||||
if len(token) < 5 {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[0] != `\msys` &&
|
||||
token[0] != `\cygwin` &&
|
||||
token[0] != `\Device\NamedPipe\msys` &&
|
||||
token[0] != `\Device\NamedPipe\cygwin` {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[1] == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(token[2], "pty") {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[3] != `from` && token[3] != `to` {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[4] != "master" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
|
||||
// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion
|
||||
// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
|
||||
// Windows vista to 10
|
||||
// see https://stackoverflow.com/a/18792477 for details
|
||||
func getFileNameByHandle(fd uintptr) (string, error) {
|
||||
if procNtQueryObject == nil {
|
||||
return "", errors.New("ntdll.dll: NtQueryObject not supported")
|
||||
}
|
||||
|
||||
var buf [4 + syscall.MAX_PATH]uint16
|
||||
var result int
|
||||
r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
|
||||
fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
|
||||
if r != 0 {
|
||||
return "", e
|
||||
}
|
||||
return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||
// terminal.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
if procGetFileInformationByHandleEx == nil {
|
||||
name, err := getFileNameByHandle(fd)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return isCygwinPipeName(name)
|
||||
}
|
||||
|
||||
// Cygwin/msys's pty is a pipe.
|
||||
ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
|
||||
if ft != fileTypePipe || e != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var buf [2 + syscall.MAX_PATH]uint16
|
||||
r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
|
||||
4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
|
||||
uintptr(len(buf)*2), 0, 0)
|
||||
if r == 0 || e != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
l := *(*uint32)(unsafe.Pointer(&buf))
|
||||
return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
|
||||
}
|
8
vendor/github.com/mattn/go-isatty/renovate.json
generated
vendored
8
vendor/github.com/mattn/go-isatty/renovate.json
generated
vendored
@ -1,8 +0,0 @@
|
||||
{
|
||||
"extends": [
|
||||
"config:base"
|
||||
],
|
||||
"postUpdateOptions": [
|
||||
"gomodTidy"
|
||||
]
|
||||
}
|
5
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
5
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
@ -90,6 +90,11 @@
|
||||
|
||||
## Log
|
||||
|
||||
## 2020-08-19
|
||||
|
||||
Release v0.5.8 fixes issue
|
||||
[issue #35](https://github.com/ulikunitz/xz/issues/35).
|
||||
|
||||
### 2020-02-24
|
||||
|
||||
Release v0.5.7 supports the check-ID None and fixes
|
||||
|
7
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
7
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
@ -54,6 +54,8 @@ var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer")
|
||||
|
||||
// readUvarint reads a uvarint from the given byte reader.
|
||||
func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
|
||||
const maxUvarintLen = 10
|
||||
|
||||
var s uint
|
||||
i := 0
|
||||
for {
|
||||
@ -62,8 +64,11 @@ func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
|
||||
return x, i, err
|
||||
}
|
||||
i++
|
||||
if i > maxUvarintLen {
|
||||
return x, i, errOverflowU64
|
||||
}
|
||||
if b < 0x80 {
|
||||
if i > 10 || i == 10 && b > 1 {
|
||||
if i == maxUvarintLen && b > 1 {
|
||||
return x, i, errOverflowU64
|
||||
}
|
||||
return x | uint64(b)<<s, i, nil
|
||||
|
28
vendor/github.com/vbauerster/mpb/v5/bar.go
generated
vendored
28
vendor/github.com/vbauerster/mpb/v5/bar.go
generated
vendored
@ -86,7 +86,7 @@ func newBar(container *Progress, bs *bState) *Bar {
|
||||
noPop: bs.noPop,
|
||||
operateState: make(chan func(*bState)),
|
||||
frameCh: make(chan io.Reader, 1),
|
||||
syncTableCh: make(chan [][]chan int),
|
||||
syncTableCh: make(chan [][]chan int, 1),
|
||||
completed: make(chan bool, 1),
|
||||
done: make(chan struct{}),
|
||||
cancel: cancel,
|
||||
@ -132,14 +132,18 @@ func (b *Bar) Current() int64 {
|
||||
// Given default bar style is "[=>-]<+", refill rune is '+'.
|
||||
// To set bar style use mpb.BarStyle(string) BarOption.
|
||||
func (b *Bar) SetRefill(amount int64) {
|
||||
b.operateState <- func(s *bState) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
s.refill = amount
|
||||
}:
|
||||
case <-b.done:
|
||||
}
|
||||
}
|
||||
|
||||
// TraverseDecorators traverses all available decorators and calls cb func on each.
|
||||
func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
|
||||
b.operateState <- func(s *bState) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
for _, decorators := range [...][]decor.Decorator{
|
||||
s.pDecorators,
|
||||
s.aDecorators,
|
||||
@ -148,6 +152,8 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
|
||||
cb(extractBaseDecorator(d))
|
||||
}
|
||||
}
|
||||
}:
|
||||
case <-b.done:
|
||||
}
|
||||
}
|
||||
|
||||
@ -174,6 +180,7 @@ func (b *Bar) SetTotal(total int64, complete bool) {
|
||||
}
|
||||
|
||||
// SetCurrent sets progress' current to an arbitrary value.
|
||||
// Setting a negative value will cause a panic.
|
||||
func (b *Bar) SetCurrent(current int64) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
@ -305,11 +312,13 @@ func (b *Bar) render(tw int) {
|
||||
defer func() {
|
||||
// recovering if user defined decorator panics for example
|
||||
if p := recover(); p != nil {
|
||||
s.extender = makePanicExtender(p)
|
||||
if b.recoveredPanic == nil {
|
||||
s.extender = makePanicExtender(p)
|
||||
b.toShutdown = !b.toShutdown
|
||||
b.recoveredPanic = p
|
||||
}
|
||||
frame, lines := s.extender(nil, s.reqWidth, stat)
|
||||
b.extendedLines = lines
|
||||
b.toShutdown = !b.toShutdown
|
||||
b.recoveredPanic = p
|
||||
b.frameCh <- frame
|
||||
b.dlogger.Println(p)
|
||||
}
|
||||
@ -348,12 +357,15 @@ func (b *Bar) subscribeDecorators() {
|
||||
shutdownListeners = append(shutdownListeners, d)
|
||||
}
|
||||
})
|
||||
b.operateState <- func(s *bState) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
s.averageDecorators = averageDecorators
|
||||
s.ewmaDecorators = ewmaDecorators
|
||||
s.shutdownListeners = shutdownListeners
|
||||
}:
|
||||
b.hasEwmaDecorators = len(ewmaDecorators) != 0
|
||||
case <-b.done:
|
||||
}
|
||||
b.hasEwmaDecorators = len(ewmaDecorators) != 0
|
||||
}
|
||||
|
||||
func (b *Bar) refreshTillShutdown() {
|
||||
|
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go
generated
vendored
Normal file
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_bsd.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package cwriter
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TIOCGETA
|
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go
generated
vendored
Normal file
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_linux.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build aix linux
|
||||
|
||||
package cwriter
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETS
|
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go
generated
vendored
Normal file
7
vendor/github.com/vbauerster/mpb/v5/cwriter/util_solaris.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build solaris
|
||||
|
||||
package cwriter
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETA
|
28
vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
generated
vendored
28
vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
generated
vendored
@ -3,17 +3,19 @@ package cwriter
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// NotATTY not a TeleTYpewriter error.
|
||||
var NotATTY = errors.New("not a terminal")
|
||||
|
||||
var cuuAndEd = fmt.Sprintf("%c[%%dA%[1]c[J", 27)
|
||||
// http://ascii-table.com/ansi-escape-sequences.php
|
||||
const (
|
||||
escOpen = "\x1b["
|
||||
cuuAndEd = "A\x1b[J"
|
||||
)
|
||||
|
||||
// Writer is a buffered the writer that updates the terminal. The
|
||||
// contents of writer will be flushed when Flush is called.
|
||||
@ -21,7 +23,7 @@ type Writer struct {
|
||||
out io.Writer
|
||||
buf bytes.Buffer
|
||||
lineCount int
|
||||
fd uintptr
|
||||
fd int
|
||||
isTerminal bool
|
||||
}
|
||||
|
||||
@ -29,8 +31,8 @@ type Writer struct {
|
||||
func New(out io.Writer) *Writer {
|
||||
w := &Writer{out: out}
|
||||
if f, ok := out.(*os.File); ok {
|
||||
w.fd = f.Fd()
|
||||
w.isTerminal = isatty.IsTerminal(w.fd)
|
||||
w.fd = int(f.Fd())
|
||||
w.isTerminal = IsTerminal(w.fd)
|
||||
}
|
||||
return w
|
||||
}
|
||||
@ -39,7 +41,10 @@ func New(out io.Writer) *Writer {
|
||||
func (w *Writer) Flush(lineCount int) (err error) {
|
||||
// some terminals interpret clear 0 lines as clear 1
|
||||
if w.lineCount > 0 {
|
||||
w.clearLines()
|
||||
err = w.clearLines()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
w.lineCount = lineCount
|
||||
_, err = w.buf.WriteTo(w.out)
|
||||
@ -70,3 +75,10 @@ func (w *Writer) GetWidth() (int, error) {
|
||||
tw, _, err := GetSize(w.fd)
|
||||
return tw, err
|
||||
}
|
||||
|
||||
func (w *Writer) ansiCuuAndEd() (err error) {
|
||||
buf := make([]byte, 8)
|
||||
buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lineCount), 10)
|
||||
_, err = w.out.Write(append(buf, cuuAndEd...))
|
||||
return
|
||||
}
|
||||
|
16
vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
generated
vendored
16
vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
generated
vendored
@ -3,20 +3,24 @@
|
||||
package cwriter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func (w *Writer) clearLines() {
|
||||
fmt.Fprintf(w.out, cuuAndEd, w.lineCount)
|
||||
func (w *Writer) clearLines() error {
|
||||
return w.ansiCuuAndEd()
|
||||
}
|
||||
|
||||
// GetSize returns the dimensions of the given terminal.
|
||||
func GetSize(fd uintptr) (width, height int, err error) {
|
||||
ws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
|
||||
func GetSize(fd int) (width, height int, err error) {
|
||||
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
return int(ws.Col), int(ws.Row), nil
|
||||
}
|
||||
|
||||
// IsTerminal returns whether the given file descriptor is a terminal.
|
||||
func IsTerminal(fd int) bool {
|
||||
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||
return err == nil
|
||||
}
|
||||
|
88
vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
generated
vendored
88
vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
generated
vendored
@ -3,67 +3,71 @@
|
||||
package cwriter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
var kernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
|
||||
var (
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
|
||||
)
|
||||
|
||||
type coord struct {
|
||||
x int16
|
||||
y int16
|
||||
}
|
||||
|
||||
type smallRect struct {
|
||||
left int16
|
||||
top int16
|
||||
right int16
|
||||
bottom int16
|
||||
}
|
||||
|
||||
type consoleScreenBufferInfo struct {
|
||||
size coord
|
||||
cursorPosition coord
|
||||
attributes uint16
|
||||
window smallRect
|
||||
maximumWindowSize coord
|
||||
}
|
||||
|
||||
func (w *Writer) clearLines() {
|
||||
func (w *Writer) clearLines() error {
|
||||
if !w.isTerminal {
|
||||
fmt.Fprintf(w.out, cuuAndEd, w.lineCount)
|
||||
// hope it's cygwin or similar
|
||||
return w.ansiCuuAndEd()
|
||||
}
|
||||
|
||||
info := new(consoleScreenBufferInfo)
|
||||
procGetConsoleScreenBufferInfo.Call(w.fd, uintptr(unsafe.Pointer(info)))
|
||||
|
||||
info.cursorPosition.y -= int16(w.lineCount)
|
||||
if info.cursorPosition.y < 0 {
|
||||
info.cursorPosition.y = 0
|
||||
var info windows.ConsoleScreenBufferInfo
|
||||
if err := windows.GetConsoleScreenBufferInfo(windows.Handle(w.fd), &info); err != nil {
|
||||
return err
|
||||
}
|
||||
procSetConsoleCursorPosition.Call(w.fd, uintptr(uint32(uint16(info.cursorPosition.y))<<16|uint32(uint16(info.cursorPosition.x))))
|
||||
|
||||
info.CursorPosition.Y -= int16(w.lineCount)
|
||||
if info.CursorPosition.Y < 0 {
|
||||
info.CursorPosition.Y = 0
|
||||
}
|
||||
_, _, _ = procSetConsoleCursorPosition.Call(
|
||||
uintptr(w.fd),
|
||||
uintptr(uint32(uint16(info.CursorPosition.Y))<<16|uint32(uint16(info.CursorPosition.X))),
|
||||
)
|
||||
|
||||
// clear the lines
|
||||
cursor := &coord{
|
||||
x: info.window.left,
|
||||
y: info.cursorPosition.y,
|
||||
cursor := &windows.Coord{
|
||||
X: info.Window.Left,
|
||||
Y: info.CursorPosition.Y,
|
||||
}
|
||||
count := uint32(info.size.x) * uint32(w.lineCount)
|
||||
procFillConsoleOutputCharacter.Call(w.fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(cursor)), uintptr(unsafe.Pointer(new(uint32))))
|
||||
count := uint32(info.Size.X) * uint32(w.lineCount)
|
||||
_, _, _ = procFillConsoleOutputCharacter.Call(
|
||||
uintptr(w.fd),
|
||||
uintptr(' '),
|
||||
uintptr(count),
|
||||
*(*uintptr)(unsafe.Pointer(cursor)),
|
||||
uintptr(unsafe.Pointer(new(uint32))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSize returns the visible dimensions of the given terminal.
|
||||
//
|
||||
// These dimensions don't include any scrollback buffer height.
|
||||
func GetSize(fd uintptr) (width, height int, err error) {
|
||||
info := new(consoleScreenBufferInfo)
|
||||
procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(info)))
|
||||
return int(info.window.right - info.window.left), int(info.window.bottom - info.window.top), nil
|
||||
func GetSize(fd int) (width, height int, err error) {
|
||||
var info windows.ConsoleScreenBufferInfo
|
||||
if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
// terminal.GetSize from crypto/ssh adds "+ 1" to both width and height:
|
||||
// https://go.googlesource.com/crypto/+/refs/heads/release-branch.go1.14/ssh/terminal/util_windows.go#75
|
||||
// but looks like this is a root cause of issue #66, so removing both "+ 1" have fixed it.
|
||||
return int(info.Window.Right - info.Window.Left), int(info.Window.Bottom - info.Window.Top), nil
|
||||
}
|
||||
|
||||
// IsTerminal returns whether the given file descriptor is a terminal.
|
||||
func IsTerminal(fd int) bool {
|
||||
var st uint32
|
||||
err := windows.GetConsoleMode(windows.Handle(fd), &st)
|
||||
return err == nil
|
||||
}
|
||||
|
216
vendor/github.com/vbauerster/mpb/v5/decor/counters.go
generated
vendored
216
vendor/github.com/vbauerster/mpb/v5/decor/counters.go
generated
vendored
@ -2,6 +2,7 @@ package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -31,7 +32,7 @@ func CountersKiloByte(pairFmt string, wcc ...WC) Decorator {
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `pairFmt` printf compatible verbs for current and total, like "%f" or "%d"
|
||||
// `pairFmt` printf compatible verbs for current and total pair
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
@ -43,25 +44,200 @@ func CountersKiloByte(pairFmt string, wcc ...WC) Decorator {
|
||||
// pairFmt="% d / % d" output: "1 MB / 12 MB"
|
||||
//
|
||||
func Counters(unit int, pairFmt string, wcc ...WC) Decorator {
|
||||
return Any(chooseSizeProducer(unit, pairFmt), wcc...)
|
||||
producer := func(unit int, pairFmt string) DecorFunc {
|
||||
if pairFmt == "" {
|
||||
pairFmt = "%d / %d"
|
||||
} else if strings.Count(pairFmt, "%") != 2 {
|
||||
panic("expected pairFmt with exactly 2 verbs")
|
||||
}
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(pairFmt, SizeB1024(s.Current), SizeB1024(s.Total))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(pairFmt, SizeB1000(s.Current), SizeB1000(s.Total))
|
||||
}
|
||||
default:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(pairFmt, s.Current, s.Total)
|
||||
}
|
||||
}
|
||||
}
|
||||
return Any(producer(unit, pairFmt), wcc...)
|
||||
}
|
||||
|
||||
func chooseSizeProducer(unit int, format string) DecorFunc {
|
||||
if format == "" {
|
||||
format = "%d / %d"
|
||||
}
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1024(s.Current), SizeB1024(s.Total))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1000(s.Current), SizeB1000(s.Total))
|
||||
}
|
||||
default:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, s.Current, s.Total)
|
||||
}
|
||||
}
|
||||
// TotalNoUnit is a wrapper around Total with no unit param.
|
||||
func TotalNoUnit(format string, wcc ...WC) Decorator {
|
||||
return Total(0, format, wcc...)
|
||||
}
|
||||
|
||||
// TotalKibiByte is a wrapper around Total with predefined unit
|
||||
// UnitKiB (bytes/1024).
|
||||
func TotalKibiByte(format string, wcc ...WC) Decorator {
|
||||
return Total(UnitKiB, format, wcc...)
|
||||
}
|
||||
|
||||
// TotalKiloByte is a wrapper around Total with predefined unit
|
||||
// UnitKB (bytes/1000).
|
||||
func TotalKiloByte(format string, wcc ...WC) Decorator {
|
||||
return Total(UnitKB, format, wcc...)
|
||||
}
|
||||
|
||||
// Total decorator with dynamic unit measure adjustment.
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `format` printf compatible verb for Total
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
// format example if unit=UnitKiB:
|
||||
//
|
||||
// format="%.1f" output: "12.0MiB"
|
||||
// format="% .1f" output: "12.0 MiB"
|
||||
// format="%d" output: "12MiB"
|
||||
// format="% d" output: "12 MiB"
|
||||
//
|
||||
func Total(unit int, format string, wcc ...WC) Decorator {
|
||||
producer := func(unit int, format string) DecorFunc {
|
||||
if format == "" {
|
||||
format = "%d"
|
||||
} else if strings.Count(format, "%") != 1 {
|
||||
panic("expected format with exactly 1 verb")
|
||||
}
|
||||
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1024(s.Total))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1000(s.Total))
|
||||
}
|
||||
default:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, s.Total)
|
||||
}
|
||||
}
|
||||
}
|
||||
return Any(producer(unit, format), wcc...)
|
||||
}
|
||||
|
||||
// CurrentNoUnit is a wrapper around Current with no unit param.
|
||||
func CurrentNoUnit(format string, wcc ...WC) Decorator {
|
||||
return Current(0, format, wcc...)
|
||||
}
|
||||
|
||||
// CurrentKibiByte is a wrapper around Current with predefined unit
|
||||
// UnitKiB (bytes/1024).
|
||||
func CurrentKibiByte(format string, wcc ...WC) Decorator {
|
||||
return Current(UnitKiB, format, wcc...)
|
||||
}
|
||||
|
||||
// CurrentKiloByte is a wrapper around Current with predefined unit
|
||||
// UnitKB (bytes/1000).
|
||||
func CurrentKiloByte(format string, wcc ...WC) Decorator {
|
||||
return Current(UnitKB, format, wcc...)
|
||||
}
|
||||
|
||||
// Current decorator with dynamic unit measure adjustment.
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `format` printf compatible verb for Current
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
// format example if unit=UnitKiB:
|
||||
//
|
||||
// format="%.1f" output: "12.0MiB"
|
||||
// format="% .1f" output: "12.0 MiB"
|
||||
// format="%d" output: "12MiB"
|
||||
// format="% d" output: "12 MiB"
|
||||
//
|
||||
func Current(unit int, format string, wcc ...WC) Decorator {
|
||||
producer := func(unit int, format string) DecorFunc {
|
||||
if format == "" {
|
||||
format = "%d"
|
||||
} else if strings.Count(format, "%") != 1 {
|
||||
panic("expected format with exactly 1 verb")
|
||||
}
|
||||
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1024(s.Current))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1000(s.Current))
|
||||
}
|
||||
default:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, s.Current)
|
||||
}
|
||||
}
|
||||
}
|
||||
return Any(producer(unit, format), wcc...)
|
||||
}
|
||||
|
||||
// InvertedCurrentNoUnit is a wrapper around InvertedCurrent with no unit param.
|
||||
func InvertedCurrentNoUnit(format string, wcc ...WC) Decorator {
|
||||
return InvertedCurrent(0, format, wcc...)
|
||||
}
|
||||
|
||||
// InvertedCurrentKibiByte is a wrapper around InvertedCurrent with predefined unit
|
||||
// UnitKiB (bytes/1024).
|
||||
func InvertedCurrentKibiByte(format string, wcc ...WC) Decorator {
|
||||
return InvertedCurrent(UnitKiB, format, wcc...)
|
||||
}
|
||||
|
||||
// InvertedCurrentKiloByte is a wrapper around InvertedCurrent with predefined unit
|
||||
// UnitKB (bytes/1000).
|
||||
func InvertedCurrentKiloByte(format string, wcc ...WC) Decorator {
|
||||
return InvertedCurrent(UnitKB, format, wcc...)
|
||||
}
|
||||
|
||||
// InvertedCurrent decorator with dynamic unit measure adjustment.
|
||||
//
|
||||
// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
|
||||
//
|
||||
// `format` printf compatible verb for InvertedCurrent
|
||||
//
|
||||
// `wcc` optional WC config
|
||||
//
|
||||
// format example if unit=UnitKiB:
|
||||
//
|
||||
// format="%.1f" output: "12.0MiB"
|
||||
// format="% .1f" output: "12.0 MiB"
|
||||
// format="%d" output: "12MiB"
|
||||
// format="% d" output: "12 MiB"
|
||||
//
|
||||
func InvertedCurrent(unit int, format string, wcc ...WC) Decorator {
|
||||
producer := func(unit int, format string) DecorFunc {
|
||||
if format == "" {
|
||||
format = "%d"
|
||||
} else if strings.Count(format, "%") != 1 {
|
||||
panic("expected format with exactly 1 verb")
|
||||
}
|
||||
|
||||
switch unit {
|
||||
case UnitKiB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1024(s.Total-s.Current))
|
||||
}
|
||||
case UnitKB:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, SizeB1000(s.Total-s.Current))
|
||||
}
|
||||
default:
|
||||
return func(s Statistics) string {
|
||||
return fmt.Sprintf(format, s.Total-s.Current)
|
||||
}
|
||||
}
|
||||
}
|
||||
return Any(producer(unit, format), wcc...)
|
||||
}
|
||||
|
3
vendor/github.com/vbauerster/mpb/v5/go.mod
generated
vendored
3
vendor/github.com/vbauerster/mpb/v5/go.mod
generated
vendored
@ -3,9 +3,8 @@ module github.com/vbauerster/mpb/v5
|
||||
require (
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
github.com/mattn/go-isatty v0.0.12
|
||||
github.com/mattn/go-runewidth v0.0.9
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
|
||||
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed
|
||||
)
|
||||
|
||||
go 1.14
|
||||
|
7
vendor/github.com/vbauerster/mpb/v5/go.sum
generated
vendored
7
vendor/github.com/vbauerster/mpb/v5/go.sum
generated
vendored
@ -2,10 +2,7 @@ github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdc
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
|
||||
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
11
vendor/go.etcd.io/bbolt/README.md
generated
vendored
11
vendor/go.etcd.io/bbolt/README.md
generated
vendored
@ -152,11 +152,12 @@ are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
Transactions should not depend on one another and generally shouldn't be opened
|
||||
simultaneously in the same goroutine. This can cause a deadlock as the read-write
|
||||
transaction needs to periodically re-map the data file but it cannot do so while
|
||||
any read-only transaction is open. Even a nested read-only transaction can cause
|
||||
a deadlock, as the child transaction can block the parent transaction from releasing
|
||||
its resources.
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
|
57
vendor/go.etcd.io/bbolt/freelist.go
generated
vendored
57
vendor/go.etcd.io/bbolt/freelist.go
generated
vendored
@ -2,7 +2,6 @@ package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
@ -94,24 +93,8 @@ func (f *freelist) pending_count() int {
|
||||
return count
|
||||
}
|
||||
|
||||
// copyallunsafe copies a list of all free ids and all pending ids in one sorted list.
|
||||
// copyall copies a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyallunsafe(dstptr unsafe.Pointer) { // dstptr is []pgid data pointer
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, txp := range f.pending {
|
||||
m = append(m, txp.ids...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
fpgids := f.getFreePageIDs()
|
||||
sz := len(fpgids) + len(m)
|
||||
dst := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(dstptr),
|
||||
Len: sz,
|
||||
Cap: sz,
|
||||
}))
|
||||
mergepgids(dst, fpgids, m)
|
||||
}
|
||||
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, txp := range f.pending {
|
||||
@ -284,21 +267,23 @@ func (f *freelist) read(p *page) {
|
||||
}
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
var idx, count uintptr = 0, uintptr(p.count)
|
||||
var idx, count = 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = uintptr(*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))))
|
||||
c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
count = int(c)
|
||||
if count < 0 {
|
||||
panic(fmt.Sprintf("leading element count %d overflows int", c))
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + idx*unsafe.Sizeof(pgid(0)),
|
||||
Len: int(count),
|
||||
Cap: int(count),
|
||||
}))
|
||||
var ids []pgid
|
||||
data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx)
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, count)
|
||||
|
||||
// copy the ids, so we don't modify on the freelist page directly
|
||||
idsCopy := make([]pgid, count)
|
||||
@ -331,16 +316,22 @@ func (f *freelist) write(p *page) error {
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
lenids := f.count()
|
||||
if lenids == 0 {
|
||||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
|
||||
l := f.count()
|
||||
if l == 0 {
|
||||
p.count = uint16(l)
|
||||
} else if l < 0xFFFF {
|
||||
p.count = uint16(l)
|
||||
var ids []pgid
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, l)
|
||||
f.copyall(ids)
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) = pgid(lenids)
|
||||
f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + unsafe.Sizeof(pgid(0))))
|
||||
var ids []pgid
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, l+1)
|
||||
ids[0] = pgid(l)
|
||||
f.copyall(ids[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
|
25
vendor/go.etcd.io/bbolt/node.go
generated
vendored
25
vendor/go.etcd.io/bbolt/node.go
generated
vendored
@ -3,7 +3,6 @@ package bbolt
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
@ -208,36 +207,32 @@ func (n *node) write(p *page) {
|
||||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
bp := uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
|
||||
// off tracks the offset into p of the start of the next data.
|
||||
off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Create a slice to write into of needed size and advance
|
||||
// byte pointer for next iteration.
|
||||
sz := len(item.key) + len(item.value)
|
||||
b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
|
||||
off += uintptr(sz)
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.flags = item.flags
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.vsize = uint32(len(item.value))
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.pgid = item.pgid
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// Create a slice to write into of needed size and advance
|
||||
// byte pointer for next iteration.
|
||||
klen, vlen := len(item.key), len(item.value)
|
||||
sz := klen + vlen
|
||||
b := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: bp,
|
||||
Len: sz,
|
||||
Cap: sz,
|
||||
}))
|
||||
bp += uintptr(sz)
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
l := copy(b, item.key)
|
||||
copy(b[l:], item.value)
|
||||
|
57
vendor/go.etcd.io/bbolt/page.go
generated
vendored
57
vendor/go.etcd.io/bbolt/page.go
generated
vendored
@ -3,7 +3,6 @@ package bbolt
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
@ -51,13 +50,13 @@ func (p *page) typ() string {
|
||||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
|
||||
return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
off := uintptr(index) * unsafe.Sizeof(leafPageElement{})
|
||||
return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
|
||||
return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
leafPageElementSize, int(index)))
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
@ -65,17 +64,16 @@ func (p *page) leafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return *(*[]leafPageElement)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
|
||||
Len: int(p.count),
|
||||
Cap: int(p.count),
|
||||
}))
|
||||
var elems []leafPageElement
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
off := uintptr(index) * unsafe.Sizeof(branchPageElement{})
|
||||
return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
|
||||
return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
unsafe.Sizeof(branchPageElement{}), int(index)))
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
@ -83,20 +81,15 @@ func (p *page) branchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return *(*[]branchPageElement)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
|
||||
Len: int(p.count),
|
||||
Cap: int(p.count),
|
||||
}))
|
||||
var elems []branchPageElement
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)),
|
||||
Len: n,
|
||||
Cap: n,
|
||||
}))
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
@ -115,11 +108,7 @@ type branchPageElement struct {
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
|
||||
Len: int(n.ksize),
|
||||
Cap: int(n.ksize),
|
||||
}))
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
@ -132,20 +121,16 @@ type leafPageElement struct {
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
|
||||
Len: int(n.ksize),
|
||||
Cap: int(n.ksize),
|
||||
}))
|
||||
i := int(n.pos)
|
||||
j := i + int(n.ksize)
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos) + uintptr(n.ksize),
|
||||
Len: int(n.vsize),
|
||||
Cap: int(n.vsize),
|
||||
}))
|
||||
i := int(n.pos) + int(n.ksize)
|
||||
j := i + int(n.vsize)
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
|
27
vendor/go.etcd.io/bbolt/tx.go
generated
vendored
27
vendor/go.etcd.io/bbolt/tx.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -524,24 +523,18 @@ func (tx *Tx) write() error {
|
||||
|
||||
// Write pages to disk in order.
|
||||
for _, p := range pages {
|
||||
size := (int(p.overflow) + 1) * tx.db.pageSize
|
||||
rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize)
|
||||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
var written uintptr
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
ptr := uintptr(unsafe.Pointer(p))
|
||||
for {
|
||||
// Limit our write to our max allocation size.
|
||||
sz := size
|
||||
sz := rem
|
||||
if sz > maxAllocSize-1 {
|
||||
sz = maxAllocSize - 1
|
||||
}
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
|
||||
|
||||
// Write chunk to disk.
|
||||
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: ptr,
|
||||
Len: sz,
|
||||
Cap: sz,
|
||||
}))
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -550,14 +543,14 @@ func (tx *Tx) write() error {
|
||||
tx.stats.Write++
|
||||
|
||||
// Exit inner for loop if we've written all the chunks.
|
||||
size -= sz
|
||||
if size == 0 {
|
||||
rem -= sz
|
||||
if rem == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Otherwise move offset forward and move pointer to next chunk.
|
||||
offset += int64(sz)
|
||||
ptr += uintptr(sz)
|
||||
written += uintptr(sz)
|
||||
}
|
||||
}
|
||||
|
||||
@ -576,11 +569,7 @@ func (tx *Tx) write() error {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)),
|
||||
Len: tx.db.pageSize,
|
||||
Cap: tx.db.pageSize,
|
||||
}))
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
|
||||
|
||||
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||
for i := range buf {
|
||||
|
39
vendor/go.etcd.io/bbolt/unsafe.go
generated
vendored
Normal file
39
vendor/go.etcd.io/bbolt/unsafe.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(base) + offset)
|
||||
}
|
||||
|
||||
func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
|
||||
}
|
||||
|
||||
func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
|
||||
// See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
|
||||
//
|
||||
// This memory is not allocated from C, but it is unmanaged by Go's
|
||||
// garbage collector and should behave similarly, and the compiler
|
||||
// should produce similar code. Note that this conversion allows a
|
||||
// subslice to begin after the base address, with an optional offset,
|
||||
// while the URL above does not cover this case and only slices from
|
||||
// index 0. However, the wiki never says that the address must be to
|
||||
// the beginning of a C allocation (or even that malloc was used at
|
||||
// all), so this is believed to be correct.
|
||||
return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
|
||||
}
|
||||
|
||||
// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by
|
||||
// the slice parameter. This helper should be used over other direct
|
||||
// manipulation of reflect.SliceHeader to prevent misuse, namely, converting
|
||||
// from reflect.SliceHeader to a Go slice type.
|
||||
func unsafeSlice(slice, data unsafe.Pointer, len int) {
|
||||
s := (*reflect.SliceHeader)(slice)
|
||||
s.Data = uintptr(data)
|
||||
s.Cap = len
|
||||
s.Len = len
|
||||
}
|
13
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
13
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
@ -1965,10 +1965,15 @@ func isGroupMember(gid int) bool {
|
||||
}
|
||||
|
||||
//sys faccessat(dirfd int, path string, mode uint32) (err error)
|
||||
//sys Faccessat2(dirfd int, path string, mode uint32, flags int) (err error)
|
||||
|
||||
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
|
||||
if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 {
|
||||
return EINVAL
|
||||
if flags == 0 {
|
||||
return faccessat(dirfd, path, mode)
|
||||
}
|
||||
|
||||
if err := Faccessat2(dirfd, path, mode, flags); err != ENOSYS && err != EPERM {
|
||||
return err
|
||||
}
|
||||
|
||||
// The Linux kernel faccessat system call does not take any flags.
|
||||
@ -1977,8 +1982,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
|
||||
// Because people naturally expect syscall.Faccessat to act
|
||||
// like C faccessat, we do the same.
|
||||
|
||||
if flags == 0 {
|
||||
return faccessat(dirfd, path, mode)
|
||||
if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 {
|
||||
return EINVAL
|
||||
}
|
||||
|
||||
var st Stat_t
|
||||
|
20
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
20
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
@ -265,6 +265,7 @@ const (
|
||||
CAP_AUDIT_READ = 0x25
|
||||
CAP_AUDIT_WRITE = 0x1d
|
||||
CAP_BLOCK_SUSPEND = 0x24
|
||||
CAP_BPF = 0x27
|
||||
CAP_CHOWN = 0x0
|
||||
CAP_DAC_OVERRIDE = 0x1
|
||||
CAP_DAC_READ_SEARCH = 0x2
|
||||
@ -273,7 +274,7 @@ const (
|
||||
CAP_IPC_LOCK = 0xe
|
||||
CAP_IPC_OWNER = 0xf
|
||||
CAP_KILL = 0x5
|
||||
CAP_LAST_CAP = 0x25
|
||||
CAP_LAST_CAP = 0x27
|
||||
CAP_LEASE = 0x1c
|
||||
CAP_LINUX_IMMUTABLE = 0x9
|
||||
CAP_MAC_ADMIN = 0x21
|
||||
@ -283,6 +284,7 @@ const (
|
||||
CAP_NET_BIND_SERVICE = 0xa
|
||||
CAP_NET_BROADCAST = 0xb
|
||||
CAP_NET_RAW = 0xd
|
||||
CAP_PERFMON = 0x26
|
||||
CAP_SETFCAP = 0x1f
|
||||
CAP_SETGID = 0x6
|
||||
CAP_SETPCAP = 0x8
|
||||
@ -372,6 +374,7 @@ const (
|
||||
DEVLINK_GENL_NAME = "devlink"
|
||||
DEVLINK_GENL_VERSION = 0x1
|
||||
DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14
|
||||
DEVMEM_MAGIC = 0x454d444d
|
||||
DEVPTS_SUPER_MAGIC = 0x1cd1
|
||||
DMA_BUF_MAGIC = 0x444d4142
|
||||
DT_BLK = 0x6
|
||||
@ -475,6 +478,7 @@ const (
|
||||
ETH_P_MOBITEX = 0x15
|
||||
ETH_P_MPLS_MC = 0x8848
|
||||
ETH_P_MPLS_UC = 0x8847
|
||||
ETH_P_MRP = 0x88e3
|
||||
ETH_P_MVRP = 0x88f5
|
||||
ETH_P_NCSI = 0x88f8
|
||||
ETH_P_NSH = 0x894f
|
||||
@ -602,8 +606,9 @@ const (
|
||||
FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0
|
||||
FSCRYPT_POLICY_FLAGS_PAD_8 = 0x1
|
||||
FSCRYPT_POLICY_FLAGS_PAD_MASK = 0x3
|
||||
FSCRYPT_POLICY_FLAGS_VALID = 0xf
|
||||
FSCRYPT_POLICY_FLAGS_VALID = 0x1f
|
||||
FSCRYPT_POLICY_FLAG_DIRECT_KEY = 0x4
|
||||
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 = 0x10
|
||||
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 = 0x8
|
||||
FSCRYPT_POLICY_V1 = 0x0
|
||||
FSCRYPT_POLICY_V2 = 0x2
|
||||
@ -632,7 +637,7 @@ const (
|
||||
FS_POLICY_FLAGS_PAD_4 = 0x0
|
||||
FS_POLICY_FLAGS_PAD_8 = 0x1
|
||||
FS_POLICY_FLAGS_PAD_MASK = 0x3
|
||||
FS_POLICY_FLAGS_VALID = 0xf
|
||||
FS_POLICY_FLAGS_VALID = 0x1f
|
||||
FS_VERITY_FL = 0x100000
|
||||
FS_VERITY_HASH_ALG_SHA256 = 0x1
|
||||
FS_VERITY_HASH_ALG_SHA512 = 0x2
|
||||
@ -834,6 +839,7 @@ const (
|
||||
IPPROTO_EGP = 0x8
|
||||
IPPROTO_ENCAP = 0x62
|
||||
IPPROTO_ESP = 0x32
|
||||
IPPROTO_ETHERNET = 0x8f
|
||||
IPPROTO_FRAGMENT = 0x2c
|
||||
IPPROTO_GRE = 0x2f
|
||||
IPPROTO_HOPOPTS = 0x0
|
||||
@ -847,6 +853,7 @@ const (
|
||||
IPPROTO_L2TP = 0x73
|
||||
IPPROTO_MH = 0x87
|
||||
IPPROTO_MPLS = 0x89
|
||||
IPPROTO_MPTCP = 0x106
|
||||
IPPROTO_MTP = 0x5c
|
||||
IPPROTO_NONE = 0x3b
|
||||
IPPROTO_PIM = 0x67
|
||||
@ -1016,6 +1023,7 @@ const (
|
||||
KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2
|
||||
KEYCTL_CAPS0_PUBLIC_KEY = 0x8
|
||||
KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40
|
||||
KEYCTL_CAPS1_NOTIFICATIONS = 0x4
|
||||
KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1
|
||||
KEYCTL_CAPS1_NS_KEY_TAG = 0x2
|
||||
KEYCTL_CHOWN = 0x4
|
||||
@ -1053,6 +1061,7 @@ const (
|
||||
KEYCTL_SUPPORTS_VERIFY = 0x8
|
||||
KEYCTL_UNLINK = 0x9
|
||||
KEYCTL_UPDATE = 0x2
|
||||
KEYCTL_WATCH_KEY = 0x20
|
||||
KEY_REQKEY_DEFL_DEFAULT = 0x0
|
||||
KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6
|
||||
KEY_REQKEY_DEFL_NO_CHANGE = -0x1
|
||||
@ -1096,6 +1105,8 @@ const (
|
||||
LOOP_SET_FD = 0x4c00
|
||||
LOOP_SET_STATUS = 0x4c02
|
||||
LOOP_SET_STATUS64 = 0x4c04
|
||||
LOOP_SET_STATUS_CLEARABLE_FLAGS = 0x4
|
||||
LOOP_SET_STATUS_SETTABLE_FLAGS = 0xc
|
||||
LO_KEY_SIZE = 0x20
|
||||
LO_NAME_SIZE = 0x40
|
||||
MADV_COLD = 0x14
|
||||
@ -1992,8 +2003,10 @@ const (
|
||||
STATX_ATTR_APPEND = 0x20
|
||||
STATX_ATTR_AUTOMOUNT = 0x1000
|
||||
STATX_ATTR_COMPRESSED = 0x4
|
||||
STATX_ATTR_DAX = 0x2000
|
||||
STATX_ATTR_ENCRYPTED = 0x800
|
||||
STATX_ATTR_IMMUTABLE = 0x10
|
||||
STATX_ATTR_MOUNT_ROOT = 0x2000
|
||||
STATX_ATTR_NODUMP = 0x40
|
||||
STATX_ATTR_VERITY = 0x100000
|
||||
STATX_BASIC_STATS = 0x7ff
|
||||
@ -2002,6 +2015,7 @@ const (
|
||||
STATX_CTIME = 0x80
|
||||
STATX_GID = 0x10
|
||||
STATX_INO = 0x100
|
||||
STATX_MNT_ID = 0x1000
|
||||
STATX_MODE = 0x2
|
||||
STATX_MTIME = 0x40
|
||||
STATX_NLINK = 0x4
|
||||
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
@ -192,6 +192,7 @@ const (
|
||||
PPPIOCSRASYNCMAP = 0x40047454
|
||||
PPPIOCSXASYNCMAP = 0x4020744f
|
||||
PPPIOCXFERUNIT = 0x744e
|
||||
PROT_BTI = 0x10
|
||||
PR_SET_PTRACER_ANY = 0xffffffffffffffff
|
||||
PTRACE_SYSEMU = 0x1f
|
||||
PTRACE_SYSEMU_SINGLESTEP = 0x20
|
||||
|
15
vendor/golang.org/x/sys/unix/zsyscall_linux.go
generated
vendored
15
vendor/golang.org/x/sys/unix/zsyscall_linux.go
generated
vendored
@ -1821,6 +1821,21 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := Syscall6(SYS_FACCESSAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(pathname)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
generated
vendored
@ -433,4 +433,5 @@ const (
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
SYS_FACCESSAT2 = 439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
generated
vendored
@ -355,4 +355,5 @@ const (
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
SYS_FACCESSAT2 = 439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
generated
vendored
@ -397,4 +397,5 @@ const (
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
SYS_FACCESSAT2 = 439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
generated
vendored
@ -300,4 +300,5 @@ const (
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
SYS_FACCESSAT2 = 439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
generated
vendored
@ -418,4 +418,5 @@ const (
|
||||
SYS_CLONE3 = 4435
|
||||
SYS_OPENAT2 = 4437
|
||||
SYS_PIDFD_GETFD = 4438
|
||||
SYS_FACCESSAT2 = 4439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
generated
vendored
@ -348,4 +348,5 @@ const (
|
||||
SYS_CLONE3 = 5435
|
||||
SYS_OPENAT2 = 5437
|
||||
SYS_PIDFD_GETFD = 5438
|
||||
SYS_FACCESSAT2 = 5439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
generated
vendored
@ -348,4 +348,5 @@ const (
|
||||
SYS_CLONE3 = 5435
|
||||
SYS_OPENAT2 = 5437
|
||||
SYS_PIDFD_GETFD = 5438
|
||||
SYS_FACCESSAT2 = 5439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
generated
vendored
@ -418,4 +418,5 @@ const (
|
||||
SYS_CLONE3 = 4435
|
||||
SYS_OPENAT2 = 4437
|
||||
SYS_PIDFD_GETFD = 4438
|
||||
SYS_FACCESSAT2 = 4439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
generated
vendored
@ -397,4 +397,5 @@ const (
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
SYS_FACCESSAT2 = 439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
generated
vendored
@ -397,4 +397,5 @@ const (
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
SYS_FACCESSAT2 = 439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
generated
vendored
@ -299,4 +299,5 @@ const (
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
SYS_FACCESSAT2 = 439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
generated
vendored
@ -362,4 +362,5 @@ const (
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
SYS_FACCESSAT2 = 439
|
||||
)
|
||||
|
1
vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
generated
vendored
@ -376,4 +376,5 @@ const (
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
SYS_FACCESSAT2 = 439
|
||||
)
|
||||
|
38
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
38
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
@ -67,7 +67,9 @@ type Statx_t struct {
|
||||
Rdev_minor uint32
|
||||
Dev_major uint32
|
||||
Dev_minor uint32
|
||||
_ [14]uint64
|
||||
Mnt_id uint64
|
||||
_ uint64
|
||||
_ [12]uint64
|
||||
}
|
||||
|
||||
type Fsid struct {
|
||||
@ -671,6 +673,8 @@ type InotifyEvent struct {
|
||||
|
||||
const SizeofInotifyEvent = 0x10
|
||||
|
||||
const SI_LOAD_SHIFT = 0x10
|
||||
|
||||
type Utsname struct {
|
||||
Sysname [65]byte
|
||||
Nodename [65]byte
|
||||
@ -1912,6 +1916,10 @@ const (
|
||||
BPF_MAP_DELETE_BATCH = 0x1b
|
||||
BPF_LINK_CREATE = 0x1c
|
||||
BPF_LINK_UPDATE = 0x1d
|
||||
BPF_LINK_GET_FD_BY_ID = 0x1e
|
||||
BPF_LINK_GET_NEXT_ID = 0x1f
|
||||
BPF_ENABLE_STATS = 0x20
|
||||
BPF_ITER_CREATE = 0x21
|
||||
BPF_MAP_TYPE_UNSPEC = 0x0
|
||||
BPF_MAP_TYPE_HASH = 0x1
|
||||
BPF_MAP_TYPE_ARRAY = 0x2
|
||||
@ -1939,6 +1947,7 @@ const (
|
||||
BPF_MAP_TYPE_SK_STORAGE = 0x18
|
||||
BPF_MAP_TYPE_DEVMAP_HASH = 0x19
|
||||
BPF_MAP_TYPE_STRUCT_OPS = 0x1a
|
||||
BPF_MAP_TYPE_RINGBUF = 0x1b
|
||||
BPF_PROG_TYPE_UNSPEC = 0x0
|
||||
BPF_PROG_TYPE_SOCKET_FILTER = 0x1
|
||||
BPF_PROG_TYPE_KPROBE = 0x2
|
||||
@ -1997,6 +2006,18 @@ const (
|
||||
BPF_TRACE_FEXIT = 0x19
|
||||
BPF_MODIFY_RETURN = 0x1a
|
||||
BPF_LSM_MAC = 0x1b
|
||||
BPF_TRACE_ITER = 0x1c
|
||||
BPF_CGROUP_INET4_GETPEERNAME = 0x1d
|
||||
BPF_CGROUP_INET6_GETPEERNAME = 0x1e
|
||||
BPF_CGROUP_INET4_GETSOCKNAME = 0x1f
|
||||
BPF_CGROUP_INET6_GETSOCKNAME = 0x20
|
||||
BPF_XDP_DEVMAP = 0x21
|
||||
BPF_LINK_TYPE_UNSPEC = 0x0
|
||||
BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1
|
||||
BPF_LINK_TYPE_TRACING = 0x2
|
||||
BPF_LINK_TYPE_CGROUP = 0x3
|
||||
BPF_LINK_TYPE_ITER = 0x4
|
||||
BPF_LINK_TYPE_NETNS = 0x5
|
||||
BPF_ANY = 0x0
|
||||
BPF_NOEXIST = 0x1
|
||||
BPF_EXIST = 0x2
|
||||
@ -2012,6 +2033,7 @@ const (
|
||||
BPF_F_WRONLY_PROG = 0x100
|
||||
BPF_F_CLONE = 0x200
|
||||
BPF_F_MMAPABLE = 0x400
|
||||
BPF_STATS_RUN_TIME = 0x0
|
||||
BPF_STACK_BUILD_ID_EMPTY = 0x0
|
||||
BPF_STACK_BUILD_ID_VALID = 0x1
|
||||
BPF_STACK_BUILD_ID_IP = 0x2
|
||||
@ -2035,16 +2057,30 @@ const (
|
||||
BPF_F_CURRENT_CPU = 0xffffffff
|
||||
BPF_F_CTXLEN_MASK = 0xfffff00000000
|
||||
BPF_F_CURRENT_NETNS = -0x1
|
||||
BPF_CSUM_LEVEL_QUERY = 0x0
|
||||
BPF_CSUM_LEVEL_INC = 0x1
|
||||
BPF_CSUM_LEVEL_DEC = 0x2
|
||||
BPF_CSUM_LEVEL_RESET = 0x3
|
||||
BPF_F_ADJ_ROOM_FIXED_GSO = 0x1
|
||||
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2
|
||||
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4
|
||||
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
|
||||
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
|
||||
BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20
|
||||
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
|
||||
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
|
||||
BPF_F_SYSCTL_BASE_NAME = 0x1
|
||||
BPF_SK_STORAGE_GET_F_CREATE = 0x1
|
||||
BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1
|
||||
BPF_RB_NO_WAKEUP = 0x1
|
||||
BPF_RB_FORCE_WAKEUP = 0x2
|
||||
BPF_RB_AVAIL_DATA = 0x0
|
||||
BPF_RB_RING_SIZE = 0x1
|
||||
BPF_RB_CONS_POS = 0x2
|
||||
BPF_RB_PROD_POS = 0x3
|
||||
BPF_RINGBUF_BUSY_BIT = 0x80000000
|
||||
BPF_RINGBUF_DISCARD_BIT = 0x40000000
|
||||
BPF_RINGBUF_HDR_SZ = 0x8
|
||||
BPF_ADJ_ROOM_NET = 0x0
|
||||
BPF_ADJ_ROOM_MAC = 0x1
|
||||
BPF_HDR_START_MAC = 0x0
|
||||
|
16
vendor/modules.txt
vendored
16
vendor/modules.txt
vendored
@ -38,16 +38,16 @@ github.com/containerd/containerd/errdefs
|
||||
# github.com/containers/common v0.22.0
|
||||
github.com/containers/common/pkg/auth
|
||||
github.com/containers/common/pkg/retry
|
||||
# github.com/containers/image/v5 v5.5.2
|
||||
# github.com/containers/image/v5 v5.6.0
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
github.com/containers/image/v5/directory/explicitfilepath
|
||||
github.com/containers/image/v5/docker
|
||||
github.com/containers/image/v5/docker/archive
|
||||
github.com/containers/image/v5/docker/daemon
|
||||
github.com/containers/image/v5/docker/internal/tarfile
|
||||
github.com/containers/image/v5/docker/policyconfiguration
|
||||
github.com/containers/image/v5/docker/reference
|
||||
github.com/containers/image/v5/docker/tarfile
|
||||
github.com/containers/image/v5/image
|
||||
github.com/containers/image/v5/internal/iolimits
|
||||
github.com/containers/image/v5/internal/pkg/keyctl
|
||||
@ -200,7 +200,7 @@ github.com/hashicorp/errwrap
|
||||
github.com/hashicorp/go-multierror
|
||||
# github.com/hashicorp/golang-lru v0.5.1
|
||||
github.com/hashicorp/golang-lru/simplelru
|
||||
# github.com/imdario/mergo v0.3.9
|
||||
# github.com/imdario/mergo v0.3.11
|
||||
github.com/imdario/mergo
|
||||
# github.com/inconshreveable/mousetrap v1.0.0
|
||||
github.com/inconshreveable/mousetrap
|
||||
@ -219,8 +219,6 @@ github.com/konsorten/go-windows-terminal-sequences
|
||||
github.com/kr/pretty
|
||||
# github.com/kr/text v0.1.0
|
||||
github.com/kr/text
|
||||
# github.com/mattn/go-isatty v0.0.12
|
||||
github.com/mattn/go-isatty
|
||||
# github.com/mattn/go-runewidth v0.0.9
|
||||
github.com/mattn/go-runewidth
|
||||
# github.com/mattn/go-shellwords v1.0.10
|
||||
@ -288,7 +286,7 @@ github.com/stretchr/testify/require
|
||||
github.com/syndtr/gocapability/capability
|
||||
# github.com/tchap/go-patricia v2.3.0+incompatible
|
||||
github.com/tchap/go-patricia/patricia
|
||||
# github.com/ulikunitz/xz v0.5.7
|
||||
# github.com/ulikunitz/xz v0.5.8
|
||||
github.com/ulikunitz/xz
|
||||
github.com/ulikunitz/xz/internal/hash
|
||||
github.com/ulikunitz/xz/internal/xlog
|
||||
@ -297,7 +295,7 @@ github.com/ulikunitz/xz/lzma
|
||||
github.com/vbatts/tar-split/archive/tar
|
||||
github.com/vbatts/tar-split/tar/asm
|
||||
github.com/vbatts/tar-split/tar/storage
|
||||
# github.com/vbauerster/mpb/v5 v5.2.2
|
||||
# github.com/vbauerster/mpb/v5 v5.3.0
|
||||
github.com/vbauerster/mpb/v5
|
||||
github.com/vbauerster/mpb/v5/cwriter
|
||||
github.com/vbauerster/mpb/v5/decor
|
||||
@ -310,7 +308,7 @@ github.com/xeipuuv/gojsonpointer
|
||||
github.com/xeipuuv/gojsonreference
|
||||
# github.com/xeipuuv/gojsonschema v1.2.0
|
||||
github.com/xeipuuv/gojsonschema
|
||||
# go.etcd.io/bbolt v1.3.4
|
||||
# go.etcd.io/bbolt v1.3.5
|
||||
go.etcd.io/bbolt
|
||||
# go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1
|
||||
go.mozilla.org/pkcs7
|
||||
@ -344,7 +342,7 @@ golang.org/x/net/internal/socks
|
||||
golang.org/x/net/proxy
|
||||
# golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
|
||||
golang.org/x/sync/semaphore
|
||||
# golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
|
||||
# golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed
|
||||
golang.org/x/sys/internal/unsafeheader
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
|
Loading…
Reference in New Issue
Block a user