mirror of
https://github.com/mudler/luet.git
synced 2025-09-04 16:50:50 +00:00
🔧 Update modules
This commit is contained in:
5
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
5
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
@@ -393,9 +393,8 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
|
||||
}
|
||||
}
|
||||
|
||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||
// must happen after chown, as that can modify the file mode
|
||||
if err := handleLChmod(hdr, path, hdrInfo); err != nil {
|
||||
// call lchmod after lchown since lchown can modify the file mode
|
||||
if err := lchmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
14
vendor/github.com/containerd/containerd/archive/tar_freebsd.go
generated
vendored
14
vendor/github.com/containerd/containerd/archive/tar_freebsd.go
generated
vendored
@@ -18,7 +18,11 @@
|
||||
|
||||
package archive
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// mknod wraps unix.Mknod. FreeBSD's unix.Mknod signature is different from
|
||||
// other Unix and Unix-like operating systems.
|
||||
@@ -34,3 +38,11 @@ func lsetxattrCreate(link string, attr string, data []byte) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func lchmod(path string, mode os.FileMode) error {
|
||||
err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "lchmod", Path: path, Err: err}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
21
vendor/github.com/containerd/containerd/archive/tar_mostunix.go
generated
vendored
21
vendor/github.com/containerd/containerd/archive/tar_mostunix.go
generated
vendored
@@ -18,7 +18,11 @@
|
||||
|
||||
package archive
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// mknod wraps Unix.Mknod and casts dev to int
|
||||
func mknod(path string, mode uint32, dev uint64) error {
|
||||
@@ -34,3 +38,18 @@ func lsetxattrCreate(link string, attr string, data []byte) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// lchmod checks for symlink and changes the mode if not a symlink
|
||||
func lchmod(path string, mode os.FileMode) error {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSymlink == 0 {
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
15
vendor/github.com/containerd/containerd/archive/tar_unix.go
generated
vendored
15
vendor/github.com/containerd/containerd/archive/tar_unix.go
generated
vendored
@@ -111,21 +111,6 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
return mknod(path, mode, unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor)))
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getxattr(path, attr string) ([]byte, error) {
|
||||
b, err := sysx.LGetxattr(path, attr)
|
||||
if err == unix.ENOTSUP || err == sysx.ENODATA {
|
||||
|
2
vendor/github.com/containerd/containerd/archive/tar_windows.go
generated
vendored
2
vendor/github.com/containerd/containerd/archive/tar_windows.go
generated
vendored
@@ -98,7 +98,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
func lchmod(path string, mode os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
18
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
18
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@@ -144,9 +144,14 @@ func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected dige
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := copyWithBuffer(cw, r); err != nil {
|
||||
copied, err := copyWithBuffer(cw, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
}
|
||||
if size != 0 && copied < size-ws.Offset {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return errors.Wrapf(io.ErrUnexpectedEOF, "failed to read expected number of bytes")
|
||||
}
|
||||
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
@@ -165,8 +170,15 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
|
||||
return err
|
||||
copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
}
|
||||
if copied < n {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return errors.Wrap(io.ErrUnexpectedEOF, "failed to read expected number of bytes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyReader copies to a writer from a given reader, returning
|
||||
|
61
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
61
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
@@ -26,7 +26,6 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -48,6 +47,7 @@ type options struct {
|
||||
compressionLevel int
|
||||
prioritizedFiles []string
|
||||
missedPrioritizedFiles *[]string
|
||||
compression Compression
|
||||
}
|
||||
|
||||
type Option func(o *options) error
|
||||
@@ -95,6 +95,15 @@ func WithAllowPrioritizeNotFound(missedFiles *[]string) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCompression specifies compression algorithm to be used.
|
||||
// Default is gzip.
|
||||
func WithCompression(compression Compression) Option {
|
||||
return func(o *options) error {
|
||||
o.compression = compression
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Blob is an eStargz blob.
|
||||
type Blob struct {
|
||||
io.ReadCloser
|
||||
@@ -126,6 +135,9 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.compression == nil {
|
||||
opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
|
||||
}
|
||||
layerFiles := newTempFiles()
|
||||
defer func() {
|
||||
if rErr != nil {
|
||||
@@ -155,7 +167,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sw := NewWriterLevel(esgzFile, opts.compressionLevel)
|
||||
sw := NewWriterWithCompressor(esgzFile, opts.compression)
|
||||
sw.ChunkSize = opts.chunkSize
|
||||
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
|
||||
return err
|
||||
@@ -187,11 +199,12 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
||||
diffID := digest.Canonical.Digester()
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
r, err := gzip.NewReader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
|
||||
r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(diffID.Hash(), r); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
@@ -213,7 +226,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
||||
// Writers doesn't write TOC and footer to the underlying writers so they can be
|
||||
// combined into a single eStargz and tocAndFooter returned by this function can
|
||||
// be appended at the tail of that combined blob.
|
||||
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooter io.Reader, tocDgst digest.Digest, err error) {
|
||||
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
|
||||
if len(ws) == 0 {
|
||||
return nil, "", fmt.Errorf("at least one writer must be passed")
|
||||
}
|
||||
@@ -230,7 +243,7 @@ func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooter io.Read
|
||||
}
|
||||
}
|
||||
var (
|
||||
mtoc = new(jtoc)
|
||||
mtoc = new(JTOC)
|
||||
currentOffset int64
|
||||
)
|
||||
mtoc.Version = ws[0].toc.Version
|
||||
@@ -248,40 +261,16 @@ func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooter io.Read
|
||||
currentOffset += w.cw.n
|
||||
}
|
||||
|
||||
tocJSON, err := json.MarshalIndent(mtoc, "", "\t")
|
||||
return tocAndFooter(ws[0].compressor, mtoc, currentOffset)
|
||||
}
|
||||
|
||||
func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
zw, _ := gzip.NewWriterLevel(pw, compressionLevel)
|
||||
tw := tar.NewWriter(zw)
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: TOCTarName,
|
||||
Size: int64(len(tocJSON)),
|
||||
}); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := tw.Write(tocJSON); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return io.MultiReader(
|
||||
pr,
|
||||
bytes.NewReader(footerBytes(currentOffset)),
|
||||
), digest.FromBytes(tocJSON), nil
|
||||
return buf, tocDigest, nil
|
||||
}
|
||||
|
||||
// divideEntries divides passed entries to the parts at least the number specified by the
|
||||
|
585
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
585
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
@@ -23,13 +23,10 @@
|
||||
package estargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
@@ -37,7 +34,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -45,12 +41,13 @@ import (
|
||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
)
|
||||
|
||||
// A Reader permits random access reads from a stargz file.
|
||||
type Reader struct {
|
||||
sr *io.SectionReader
|
||||
toc *jtoc
|
||||
toc *JTOC
|
||||
tocDigest digest.Digest
|
||||
|
||||
// m stores all non-chunk entries, keyed by name.
|
||||
@@ -60,39 +57,116 @@ type Reader struct {
|
||||
// are split up. For a file with a single chunk, it's only
|
||||
// stored in m.
|
||||
chunks map[string][]*TOCEntry
|
||||
|
||||
decompressor Decompressor
|
||||
}
|
||||
|
||||
type openOpts struct {
|
||||
tocOffset int64
|
||||
decompressors []Decompressor
|
||||
telemetry *Telemetry
|
||||
}
|
||||
|
||||
// OpenOption is an option used during opening the layer
|
||||
type OpenOption func(o *openOpts) error
|
||||
|
||||
// WithTOCOffset option specifies the offset of TOC
|
||||
func WithTOCOffset(tocOffset int64) OpenOption {
|
||||
return func(o *openOpts) error {
|
||||
o.tocOffset = tocOffset
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDecompressors option specifies decompressors to use.
|
||||
// Default is gzip-based decompressor.
|
||||
func WithDecompressors(decompressors ...Decompressor) OpenOption {
|
||||
return func(o *openOpts) error {
|
||||
o.decompressors = decompressors
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTelemetry option specifies the telemetry hooks
|
||||
func WithTelemetry(telemetry *Telemetry) OpenOption {
|
||||
return func(o *openOpts) error {
|
||||
o.telemetry = telemetry
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MeasureLatencyHook is a func which takes start time and records the diff
|
||||
type MeasureLatencyHook func(time.Time)
|
||||
|
||||
// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record
|
||||
// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...)
|
||||
type Telemetry struct {
|
||||
GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
|
||||
GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds)
|
||||
DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds)
|
||||
}
|
||||
|
||||
// Open opens a stargz file for reading.
|
||||
// The behaviour is configurable using options.
|
||||
//
|
||||
// Note that each entry name is normalized as the path that is relative to root.
|
||||
func Open(sr *io.SectionReader) (*Reader, error) {
|
||||
tocOff, footerSize, err := OpenFooter(sr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing footer")
|
||||
func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
|
||||
var opts openOpts
|
||||
for _, o := range opt {
|
||||
if err := o(&opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
tocTargz := make([]byte, sr.Size()-tocOff-footerSize)
|
||||
if _, err := sr.ReadAt(tocTargz, tocOff); err != nil {
|
||||
return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocTargz), err)
|
||||
|
||||
gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)}
|
||||
decompressors := append(gzipCompressors, opts.decompressors...)
|
||||
|
||||
// Determine the size to fetch. Try to fetch as many bytes as possible.
|
||||
fetchSize := maxFooterSize(sr.Size(), decompressors...)
|
||||
if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize {
|
||||
if maybeTocOffset > sr.Size() {
|
||||
return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size())
|
||||
}
|
||||
fetchSize = sr.Size() - maybeTocOffset
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(tocTargz))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
|
||||
|
||||
start := time.Now() // before getting layer footer
|
||||
footer := make([]byte, fetchSize)
|
||||
if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil {
|
||||
return nil, fmt.Errorf("error reading footer: %v", err)
|
||||
}
|
||||
zr.Multistream(false)
|
||||
tr := tar.NewReader(zr)
|
||||
h, err := tr.Next()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
|
||||
if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil {
|
||||
opts.telemetry.GetFooterLatency(start)
|
||||
}
|
||||
if h.Name != TOCTarName {
|
||||
return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
|
||||
|
||||
var allErr []error
|
||||
var found bool
|
||||
var r *Reader
|
||||
for _, d := range decompressors {
|
||||
fSize := d.FooterSize()
|
||||
fOffset := positive(int64(len(footer)) - fSize)
|
||||
maybeTocBytes := footer[:fOffset]
|
||||
_, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
|
||||
if err != nil {
|
||||
allErr = append(allErr, err)
|
||||
continue
|
||||
}
|
||||
if tocSize <= 0 {
|
||||
tocSize = sr.Size() - tocOffset - fSize
|
||||
}
|
||||
if tocSize < int64(len(maybeTocBytes)) {
|
||||
maybeTocBytes = maybeTocBytes[:tocSize]
|
||||
}
|
||||
r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
|
||||
if err == nil {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
allErr = append(allErr, err)
|
||||
}
|
||||
dgstr := digest.Canonical.Digester()
|
||||
toc := new(jtoc)
|
||||
if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
|
||||
return nil, fmt.Errorf("error decoding TOC JSON: %v", err)
|
||||
if !found {
|
||||
return nil, errorutil.Aggregate(allErr)
|
||||
}
|
||||
r := &Reader{sr: sr, toc: toc, tocDigest: dgstr.Digest()}
|
||||
if err := r.initFields(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize fields of entries: %v", err)
|
||||
}
|
||||
@@ -100,17 +174,26 @@ func Open(sr *io.SectionReader) (*Reader, error) {
|
||||
}
|
||||
|
||||
// OpenFooter extracts and parses footer from the given blob.
|
||||
// only supports gzip-based eStargz.
|
||||
func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) {
|
||||
if sr.Size() < FooterSize && sr.Size() < legacyFooterSize {
|
||||
return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size())
|
||||
}
|
||||
// TODO: read a bigger chunk (1MB?) at once here to hopefully
|
||||
// get the TOC + footer in one go.
|
||||
var footer [FooterSize]byte
|
||||
if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil {
|
||||
return 0, 0, fmt.Errorf("error reading footer: %v", err)
|
||||
}
|
||||
return parseFooter(footer[:])
|
||||
var allErr []error
|
||||
for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} {
|
||||
fSize := d.FooterSize()
|
||||
fOffset := positive(int64(len(footer)) - fSize)
|
||||
_, tocOffset, _, err := d.ParseFooter(footer[fOffset:])
|
||||
if err == nil {
|
||||
return tocOffset, fSize, err
|
||||
}
|
||||
allErr = append(allErr, err)
|
||||
}
|
||||
return 0, 0, errorutil.Aggregate(allErr)
|
||||
}
|
||||
|
||||
// initFields populates the Reader from r.toc after decoding it from
|
||||
@@ -196,12 +279,12 @@ func (r *Reader) initFields() error {
|
||||
pdir := r.getOrCreateDir(pdirName)
|
||||
ent.NumLink++ // at least one name(ent.Name) references this entry.
|
||||
if ent.Type == "hardlink" {
|
||||
if org, ok := r.m[cleanEntryName(ent.LinkName)]; ok {
|
||||
org.NumLink++ // original entry is referenced by this ent.Name.
|
||||
ent = org
|
||||
} else {
|
||||
return fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
|
||||
org, err := r.getSource(ent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
org.NumLink++ // original entry is referenced by this ent.Name.
|
||||
ent = org
|
||||
}
|
||||
pdir.addChild(path.Base(name), ent)
|
||||
}
|
||||
@@ -220,6 +303,20 @@ func (r *Reader) initFields() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) {
|
||||
if ent.Type == "hardlink" {
|
||||
org, ok := r.m[cleanEntryName(ent.LinkName)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
|
||||
}
|
||||
ent, err = r.getSource(org)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return ent, nil
|
||||
}
|
||||
|
||||
func parentDir(p string) string {
|
||||
dir, _ := path.Split(p)
|
||||
return strings.TrimSuffix(dir, "/")
|
||||
@@ -243,6 +340,10 @@ func (r *Reader) getOrCreateDir(d string) *TOCEntry {
|
||||
return e
|
||||
}
|
||||
|
||||
func (r *Reader) TOCDigest() digest.Digest {
|
||||
return r.tocDigest
|
||||
}
|
||||
|
||||
// VerifyTOC checks that the TOC JSON in the passed blob matches the
|
||||
// passed digests and that the TOC JSON contains digests for all chunks
|
||||
// contained in the blob. If the verification succceeds, this function
|
||||
@@ -252,33 +353,73 @@ func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
|
||||
if r.tocDigest != tocDigest {
|
||||
return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
|
||||
}
|
||||
digestMap := make(map[int64]digest.Digest) // map from chunk offset to the digest
|
||||
return r.Verifiers()
|
||||
}
|
||||
|
||||
// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases
|
||||
// because this doesn't verify TOC.
|
||||
func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
|
||||
chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest
|
||||
regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest
|
||||
var chunkDigestMapIncomplete bool
|
||||
var regDigestMapIncomplete bool
|
||||
var containsChunk bool
|
||||
for _, e := range r.toc.Entries {
|
||||
if e.Type == "reg" || e.Type == "chunk" {
|
||||
if e.Type == "reg" && e.Size == 0 {
|
||||
if e.Type != "reg" && e.Type != "chunk" {
|
||||
continue
|
||||
}
|
||||
|
||||
// offset must be unique in stargz blob
|
||||
_, dOK := chunkDigestMap[e.Offset]
|
||||
_, rOK := regDigestMap[e.Offset]
|
||||
if dOK || rOK {
|
||||
return nil, fmt.Errorf("offset %d found twice", e.Offset)
|
||||
}
|
||||
|
||||
if e.Type == "reg" {
|
||||
if e.Size == 0 {
|
||||
continue // ignores empty file
|
||||
}
|
||||
|
||||
// offset must be unique in stargz blob
|
||||
if _, ok := digestMap[e.Offset]; ok {
|
||||
return nil, fmt.Errorf("offset %d found twice", e.Offset)
|
||||
}
|
||||
|
||||
// all chunk entries must contain digest
|
||||
if e.ChunkDigest == "" {
|
||||
return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON",
|
||||
e.Name, e.Offset)
|
||||
// record the digest of regular file payload
|
||||
if e.Digest != "" {
|
||||
d, err := digest.Parse(e.Digest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err,
|
||||
"failed to parse regular file digest %q", e.Digest)
|
||||
}
|
||||
regDigestMap[e.Offset] = d
|
||||
} else {
|
||||
regDigestMapIncomplete = true
|
||||
}
|
||||
} else {
|
||||
containsChunk = true // this layer contains "chunk" entries.
|
||||
}
|
||||
|
||||
// "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of
|
||||
// chunked file)
|
||||
if e.ChunkDigest != "" {
|
||||
d, err := digest.Parse(e.ChunkDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse digest %q", e.ChunkDigest)
|
||||
return nil, errors.Wrapf(err,
|
||||
"failed to parse chunk digest %q", e.ChunkDigest)
|
||||
}
|
||||
digestMap[e.Offset] = d
|
||||
chunkDigestMap[e.Offset] = d
|
||||
} else {
|
||||
chunkDigestMapIncomplete = true
|
||||
}
|
||||
}
|
||||
|
||||
return &verifier{digestMap: digestMap}, nil
|
||||
if chunkDigestMapIncomplete {
|
||||
// Though some chunk digests are not found, if this layer doesn't contain
|
||||
// "chunk"s and all digest of "reg" files are recorded, we can use them instead.
|
||||
if !containsChunk && !regDigestMapIncomplete {
|
||||
return &verifier{digestMap: regDigestMap}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON")
|
||||
}
|
||||
|
||||
return &verifier{digestMap: chunkDigestMap}, nil
|
||||
}
|
||||
|
||||
// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by
|
||||
@@ -337,7 +478,11 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
|
||||
}
|
||||
e, ok = r.m[path]
|
||||
if ok && e.Type == "hardlink" {
|
||||
e, ok = r.m[e.LinkName]
|
||||
var err error
|
||||
e, err = r.getSource(e)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -413,17 +558,17 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
off -= ent.ChunkOffset
|
||||
|
||||
finalEnt := fr.ents[len(fr.ents)-1]
|
||||
gzOff := ent.Offset
|
||||
// gzBytesRemain is the number of compressed gzip bytes in this
|
||||
// file remaining, over 1+ gzip chunks.
|
||||
gzBytesRemain := finalEnt.NextOffset() - gzOff
|
||||
compressedOff := ent.Offset
|
||||
// compressedBytesRemain is the number of compressed bytes in this
|
||||
// file remaining, over 1+ chunks.
|
||||
compressedBytesRemain := finalEnt.NextOffset() - compressedOff
|
||||
|
||||
sr := io.NewSectionReader(fr.r.sr, gzOff, gzBytesRemain)
|
||||
sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain)
|
||||
|
||||
const maxGZread = 2 << 20
|
||||
var bufSize = maxGZread
|
||||
if gzBytesRemain < maxGZread {
|
||||
bufSize = int(gzBytesRemain)
|
||||
const maxRead = 2 << 20
|
||||
var bufSize = maxRead
|
||||
if compressedBytesRemain < maxRead {
|
||||
bufSize = int(compressedBytesRemain)
|
||||
}
|
||||
|
||||
br := bufio.NewReaderSize(sr, bufSize)
|
||||
@@ -431,14 +576,15 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err)
|
||||
}
|
||||
|
||||
gz, err := gzip.NewReader(br)
|
||||
dr, err := fr.r.decompressor.Reader(br)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("fileReader.ReadAt.gzipNewReader: %v", err)
|
||||
return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
|
||||
}
|
||||
if n, err := io.CopyN(ioutil.Discard, gz, off); n != off || err != nil {
|
||||
defer dr.Close()
|
||||
if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil {
|
||||
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
|
||||
}
|
||||
return io.ReadFull(gz, p)
|
||||
return io.ReadFull(dr, p)
|
||||
}
|
||||
|
||||
// A Writer writes stargz files.
|
||||
@@ -447,14 +593,14 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
type Writer struct {
|
||||
bw *bufio.Writer
|
||||
cw *countWriter
|
||||
toc *jtoc
|
||||
toc *JTOC
|
||||
diffHash hash.Hash // SHA-256 of uncompressed tar
|
||||
|
||||
closed bool
|
||||
gz *gzip.Writer
|
||||
lastUsername map[int]string
|
||||
lastGroupname map[int]string
|
||||
compressionLevel int
|
||||
closed bool
|
||||
gz io.WriteCloser
|
||||
lastUsername map[int]string
|
||||
lastGroupname map[int]string
|
||||
compressor Compressor
|
||||
|
||||
// ChunkSize optionally controls the maximum number of bytes
|
||||
// of data of a regular file that can be written in one gzip
|
||||
@@ -463,16 +609,21 @@ type Writer struct {
|
||||
ChunkSize int
|
||||
}
|
||||
|
||||
// currentGzipWriter writes to the current w.gz field, which can
|
||||
// currentCompressionWriter writes to the current w.gz field, which can
|
||||
// change throughout writing a tar entry.
|
||||
//
|
||||
// Additionally, it updates w's SHA-256 of the uncompressed bytes
|
||||
// of the tar file.
|
||||
type currentGzipWriter struct{ w *Writer }
|
||||
type currentCompressionWriter struct{ w *Writer }
|
||||
|
||||
func (cgw currentGzipWriter) Write(p []byte) (int, error) {
|
||||
cgw.w.diffHash.Write(p)
|
||||
return cgw.w.gz.Write(p)
|
||||
func (ccw currentCompressionWriter) Write(p []byte) (int, error) {
|
||||
ccw.w.diffHash.Write(p)
|
||||
if ccw.w.gz == nil {
|
||||
if err := ccw.w.condOpenGz(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return ccw.w.gz.Write(p)
|
||||
}
|
||||
|
||||
func (w *Writer) chunkSize() int {
|
||||
@@ -482,26 +633,53 @@ func (w *Writer) chunkSize() int {
|
||||
return w.ChunkSize
|
||||
}
|
||||
|
||||
// NewWriter returns a new stargz writer writing to w.
|
||||
// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob.
|
||||
// TOC JSON and footer are removed.
|
||||
func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
|
||||
footerSize := c.FooterSize()
|
||||
if sr.Size() < footerSize {
|
||||
return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize)
|
||||
}
|
||||
footerOffset := sr.Size() - footerSize
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := sr.ReadAt(footer, footerOffset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobPayloadSize, _, _, err := c.ParseFooter(footer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse footer")
|
||||
}
|
||||
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
||||
}
|
||||
|
||||
// NewWriter returns a new stargz writer (gzip-based) writing to w.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return NewWriterLevel(w, gzip.BestCompression)
|
||||
}
|
||||
|
||||
// NewWriterLevel returns a new stargz writer writing to w.
|
||||
// NewWriterLevel returns a new stargz writer (gzip-based) writing to w.
|
||||
// The compression level is configurable.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
|
||||
return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel))
|
||||
}
|
||||
|
||||
// NewWriterWithCompressor returns a new stargz writer writing to w.
|
||||
// The compression method is configurable.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer {
|
||||
bw := bufio.NewWriter(w)
|
||||
cw := &countWriter{w: bw}
|
||||
return &Writer{
|
||||
bw: bw,
|
||||
cw: cw,
|
||||
toc: &jtoc{Version: 1},
|
||||
diffHash: sha256.New(),
|
||||
compressionLevel: compressionLevel,
|
||||
bw: bw,
|
||||
cw: cw,
|
||||
toc: &JTOC{Version: 1},
|
||||
diffHash: sha256.New(),
|
||||
compressor: c,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -517,42 +695,16 @@ func (w *Writer) Close() (digest.Digest, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Write the TOC index.
|
||||
tocOff := w.cw.n
|
||||
w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel)
|
||||
tw := tar.NewWriter(currentGzipWriter{w})
|
||||
tocJSON, err := json.MarshalIndent(w.toc, "", "\t")
|
||||
// Write the TOC index and footer.
|
||||
tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: TOCTarName,
|
||||
Size: int64(len(tocJSON)),
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := tw.Write(tocJSON); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := tw.Close(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := w.closeGz(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// And a little footer with pointer to the TOC gzip stream.
|
||||
if _, err := w.bw.Write(footerBytes(tocOff)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := w.bw.Flush(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return digest.FromBytes(tocJSON), nil
|
||||
return tocDigest, nil
|
||||
}
|
||||
|
||||
func (w *Writer) closeGz() error {
|
||||
@@ -584,39 +736,82 @@ func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
|
||||
return name
|
||||
}
|
||||
|
||||
func (w *Writer) condOpenGz() {
|
||||
func (w *Writer) condOpenGz() (err error) {
|
||||
if w.gz == nil {
|
||||
w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel)
|
||||
w.gz, err = w.compressor.Writer(w.cw)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AppendTar reads the tar or tar.gz file from r and appends
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be gzip compressed.
|
||||
// always be compressed by the specified compressor.
|
||||
func (w *Writer) AppendTar(r io.Reader) error {
|
||||
return w.appendTar(r, false)
|
||||
}
|
||||
|
||||
// AppendTarLossLess reads the tar or tar.gz file from r and appends
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be compressed by the specified compressor.
|
||||
//
|
||||
// The difference of this func with AppendTar is that this writes
|
||||
// the input tar stream into w without any modification (e.g. to header bytes).
|
||||
//
|
||||
// Note that if the input tar stream already contains TOC JSON, this returns
|
||||
// error because w cannot overwrite the TOC JSON to the one generated by w without
|
||||
// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz,
|
||||
// you shoud decompress it and remove TOC JSON in advance.
|
||||
func (w *Writer) AppendTarLossLess(r io.Reader) error {
|
||||
return w.appendTar(r, true)
|
||||
}
|
||||
|
||||
func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
||||
var src io.Reader
|
||||
br := bufio.NewReader(r)
|
||||
var tr *tar.Reader
|
||||
if isGzip(br) {
|
||||
// NewReader can't fail if isGzip returned true.
|
||||
zr, _ := gzip.NewReader(br)
|
||||
tr = tar.NewReader(zr)
|
||||
src = zr
|
||||
} else {
|
||||
tr = tar.NewReader(br)
|
||||
src = io.Reader(br)
|
||||
}
|
||||
dst := currentCompressionWriter{w}
|
||||
var tw *tar.Writer
|
||||
if !lossless {
|
||||
tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode.
|
||||
}
|
||||
tr := tar.NewReader(src)
|
||||
if lossless {
|
||||
tr.RawAccounting = true
|
||||
}
|
||||
for {
|
||||
h, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
if lossless {
|
||||
if remain := tr.RawBytes(); len(remain) > 0 {
|
||||
// Collect the remaining null bytes.
|
||||
// https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53
|
||||
if _, err := dst.Write(remain); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
|
||||
}
|
||||
if h.Name == TOCTarName {
|
||||
if cleanEntryName(h.Name) == TOCTarName {
|
||||
// It is possible for a layer to be "stargzified" twice during the
|
||||
// distribution lifecycle. So we reserve "TOCTarName" here to avoid
|
||||
// duplicated entries in the resulting layer.
|
||||
if lossless {
|
||||
// We cannot handle this in lossless way.
|
||||
return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -639,11 +834,18 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
ModTime3339: formatModtime(h.ModTime),
|
||||
Xattrs: xattrs,
|
||||
}
|
||||
w.condOpenGz()
|
||||
tw := tar.NewWriter(currentGzipWriter{w})
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
if err := w.condOpenGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
if tw != nil {
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := dst.Write(tr.RawBytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch h.Typeflag {
|
||||
case tar.TypeLink:
|
||||
ent.Type = "hardlink"
|
||||
@@ -699,10 +901,18 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
ent.ChunkOffset = written
|
||||
chunkDigest := digest.Canonical.Digester()
|
||||
|
||||
w.condOpenGz()
|
||||
if err := w.condOpenGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
teeChunk := io.TeeReader(tee, chunkDigest.Hash())
|
||||
if _, err := io.CopyN(tw, teeChunk, chunkSize); err != nil {
|
||||
var out io.Writer
|
||||
if tw != nil {
|
||||
out = tw
|
||||
} else {
|
||||
out = dst
|
||||
}
|
||||
if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil {
|
||||
return fmt.Errorf("error copying %q: %v", h.Name, err)
|
||||
}
|
||||
ent.ChunkDigest = chunkDigest.Digest().String()
|
||||
@@ -719,11 +929,18 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
if payloadDigest != nil {
|
||||
regFileEntry.Digest = payloadDigest.Digest().String()
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
if tw != nil {
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
remainDest := ioutil.Discard
|
||||
if lossless {
|
||||
remainDest = dst // Preserve the remaining bytes in lossless mode
|
||||
}
|
||||
_, err := io.Copy(remainDest, src)
|
||||
return err
|
||||
}
|
||||
|
||||
// DiffID returns the SHA-256 of the uncompressed tar bytes.
|
||||
@@ -732,83 +949,54 @@ func (w *Writer) DiffID() string {
|
||||
return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil))
|
||||
}
|
||||
|
||||
// footerBytes returns the 51 bytes footer.
|
||||
func footerBytes(tocOff int64) []byte {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
|
||||
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
|
||||
|
||||
// Extra header indicating the offset of TOCJSON
|
||||
// https://tools.ietf.org/html/rfc1952#section-2.3.1.1
|
||||
header := make([]byte, 4)
|
||||
header[0], header[1] = 'S', 'G'
|
||||
subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
|
||||
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
|
||||
gz.Header.Extra = append(header, []byte(subfield)...)
|
||||
gz.Close()
|
||||
if buf.Len() != FooterSize {
|
||||
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
|
||||
func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) {
|
||||
for _, d := range decompressors {
|
||||
if s := d.FooterSize(); res < s && s <= blobSize {
|
||||
res = s
|
||||
}
|
||||
}
|
||||
return buf.Bytes()
|
||||
return
|
||||
}
|
||||
|
||||
func parseFooter(p []byte) (tocOffset int64, footerSize int64, rErr error) {
|
||||
var allErr []error
|
||||
func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
|
||||
if len(tocBytes) > 0 {
|
||||
start := time.Now()
|
||||
toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
|
||||
if err == nil {
|
||||
if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
|
||||
opts.telemetry.DeserializeTocLatency(start)
|
||||
}
|
||||
return &Reader{
|
||||
sr: sr,
|
||||
toc: toc,
|
||||
tocDigest: tocDgst,
|
||||
decompressor: d,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
tocOffset, err := parseEStargzFooter(p)
|
||||
if err == nil {
|
||||
return tocOffset, FooterSize, nil
|
||||
start := time.Now()
|
||||
tocBytes = make([]byte, tocSize)
|
||||
if _, err := sr.ReadAt(tocBytes, tocOff); err != nil {
|
||||
return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err)
|
||||
}
|
||||
allErr = append(allErr, err)
|
||||
|
||||
pad := len(p) - legacyFooterSize
|
||||
if pad < 0 {
|
||||
pad = 0
|
||||
if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
|
||||
opts.telemetry.GetTocLatency(start)
|
||||
}
|
||||
tocOffset, err = parseLegacyFooter(p[pad:])
|
||||
if err == nil {
|
||||
return tocOffset, legacyFooterSize, nil
|
||||
}
|
||||
return 0, 0, errorutil.Aggregate(append(allErr, err))
|
||||
}
|
||||
|
||||
func parseEStargzFooter(p []byte) (tocOffset int64, err error) {
|
||||
if len(p) != FooterSize {
|
||||
return 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
start = time.Now()
|
||||
toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return nil, err
|
||||
}
|
||||
extra := zr.Header.Extra
|
||||
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
|
||||
if si1 != 'S' || si2 != 'G' {
|
||||
return 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
|
||||
if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
|
||||
opts.telemetry.DeserializeTocLatency(start)
|
||||
}
|
||||
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
|
||||
return 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
|
||||
}
|
||||
if string(subfield[16:]) != "STARGZ" {
|
||||
return 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
|
||||
}
|
||||
return strconv.ParseInt(string(subfield[:16]), 16, 64)
|
||||
}
|
||||
|
||||
func parseLegacyFooter(p []byte) (tocOffset int64, err error) {
|
||||
if len(p) != legacyFooterSize {
|
||||
return 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
}
|
||||
extra := zr.Header.Extra
|
||||
if len(extra) != 16+len("STARGZ") {
|
||||
return 0, fmt.Errorf("legacy: invalid stargz's extra field size")
|
||||
}
|
||||
if string(extra[16:]) != "STARGZ" {
|
||||
return 0, fmt.Errorf("legacy: magic string STARGZ not found")
|
||||
}
|
||||
return strconv.ParseInt(string(extra[:16]), 16, 64)
|
||||
return &Reader{
|
||||
sr: sr,
|
||||
toc: toc,
|
||||
tocDigest: tocDgst,
|
||||
decompressor: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func formatModtime(t time.Time) string {
|
||||
@@ -847,3 +1035,10 @@ func isGzip(br *bufio.Reader) bool {
|
||||
peek, _ := br.Peek(3)
|
||||
return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate
|
||||
}
|
||||
|
||||
func positive(n int64) int64 {
|
||||
if n < 0 {
|
||||
return 0
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
@@ -3,8 +3,9 @@ module github.com/containerd/stargz-snapshotter/estargz
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.12.3
|
||||
github.com/klauspost/compress v1.13.6
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/vbatts/tar-split v0.11.2
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
)
|
||||
|
20
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
20
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
@@ -1,10 +1,22 @@
|
||||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU=
|
||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
238
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
Normal file
238
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
Normal file
@@ -0,0 +1,238 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
*/
|
||||
|
||||
package estargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type gzipCompression struct {
|
||||
*GzipCompressor
|
||||
*GzipDecompressor
|
||||
}
|
||||
|
||||
func newGzipCompressionWithLevel(level int) Compression {
|
||||
return &gzipCompression{
|
||||
&GzipCompressor{level},
|
||||
&GzipDecompressor{},
|
||||
}
|
||||
}
|
||||
|
||||
func NewGzipCompressor() *GzipCompressor {
|
||||
return &GzipCompressor{gzip.BestCompression}
|
||||
}
|
||||
|
||||
func NewGzipCompressorWithLevel(level int) *GzipCompressor {
|
||||
return &GzipCompressor{level}
|
||||
}
|
||||
|
||||
type GzipCompressor struct {
|
||||
compressionLevel int
|
||||
}
|
||||
|
||||
func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) {
|
||||
return gzip.NewWriterLevel(w, gc.compressionLevel)
|
||||
}
|
||||
|
||||
func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) {
|
||||
tocJSON, err := json.MarshalIndent(toc, "", "\t")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel)
|
||||
gw := io.Writer(gz)
|
||||
if diffHash != nil {
|
||||
gw = io.MultiWriter(gz, diffHash)
|
||||
}
|
||||
tw := tar.NewWriter(gw)
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: TOCTarName,
|
||||
Size: int64(len(tocJSON)),
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := tw.Write(tocJSON); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := tw.Close(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := w.Write(gzipFooterBytes(off)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return digest.FromBytes(tocJSON), nil
|
||||
}
|
||||
|
||||
// gzipFooterBytes returns the 51 bytes footer.
|
||||
func gzipFooterBytes(tocOff int64) []byte {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
|
||||
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
|
||||
|
||||
// Extra header indicating the offset of TOCJSON
|
||||
// https://tools.ietf.org/html/rfc1952#section-2.3.1.1
|
||||
header := make([]byte, 4)
|
||||
header[0], header[1] = 'S', 'G'
|
||||
subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
|
||||
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
|
||||
gz.Header.Extra = append(header, []byte(subfield)...)
|
||||
gz.Close()
|
||||
if buf.Len() != FooterSize {
|
||||
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type GzipDecompressor struct{}
|
||||
|
||||
func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
|
||||
return parseTOCEStargz(r)
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
|
||||
if len(p) != FooterSize {
|
||||
return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
defer zr.Close()
|
||||
extra := zr.Header.Extra
|
||||
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
|
||||
if si1 != 'S' || si2 != 'G' {
|
||||
return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
|
||||
}
|
||||
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
|
||||
return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
|
||||
}
|
||||
if string(subfield[16:]) != "STARGZ" {
|
||||
return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
}
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) FooterSize() int64 {
|
||||
return FooterSize
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
|
||||
return decompressTOCEStargz(r)
|
||||
}
|
||||
|
||||
type LegacyGzipDecompressor struct{}
|
||||
|
||||
func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
|
||||
func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
|
||||
return parseTOCEStargz(r)
|
||||
}
|
||||
|
||||
func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
|
||||
if len(p) != legacyFooterSize {
|
||||
return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
}
|
||||
defer zr.Close()
|
||||
extra := zr.Header.Extra
|
||||
if len(extra) != 16+len("STARGZ") {
|
||||
return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
|
||||
}
|
||||
if string(extra[16:]) != "STARGZ" {
|
||||
return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
}
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
|
||||
func (gz *LegacyGzipDecompressor) FooterSize() int64 {
|
||||
return legacyFooterSize
|
||||
}
|
||||
|
||||
func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) {
|
||||
return decompressTOCEStargz(r)
|
||||
}
|
||||
|
||||
func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) {
|
||||
tr, err := decompressTOCEStargz(r)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
dgstr := digest.Canonical.Digester()
|
||||
toc = new(JTOC)
|
||||
if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
|
||||
return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err)
|
||||
}
|
||||
if err := tr.Close(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return toc, dgstr.Digest(), nil
|
||||
}
|
||||
|
||||
func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) {
|
||||
zr, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
|
||||
}
|
||||
zr.Multistream(false)
|
||||
tr := tar.NewReader(zr)
|
||||
h, err := tr.Next()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
|
||||
}
|
||||
if h.Name != TOCTarName {
|
||||
return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
|
||||
}
|
||||
return readCloser{tr, zr.Close}, nil
|
||||
}
|
2009
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
Normal file
2009
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
56
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
56
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
@@ -24,6 +24,8 @@ package estargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"hash"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
@@ -90,8 +92,8 @@ const (
|
||||
landmarkContents = 0xf
|
||||
)
|
||||
|
||||
// jtoc is the JSON-serialized table of contents index of the files in the stargz file.
|
||||
type jtoc struct {
|
||||
// JTOC is the JSON-serialized table of contents index of the files in the stargz file.
|
||||
type JTOC struct {
|
||||
Version int `json:"version"`
|
||||
Entries []*TOCEntry `json:"entries"`
|
||||
}
|
||||
@@ -262,3 +264,53 @@ type TOCEntryVerifier interface {
|
||||
// contents of the specified TOCEntry.
|
||||
Verifier(ce *TOCEntry) (digest.Verifier, error)
|
||||
}
|
||||
|
||||
// Compression provides the compression helper to be used creating and parsing eStargz.
|
||||
// This package provides gzip-based Compression by default, but any compression
|
||||
// algorithm (e.g. zstd) can be used as long as it implements Compression.
|
||||
type Compression interface {
|
||||
Compressor
|
||||
Decompressor
|
||||
}
|
||||
|
||||
// Compressor represents the helper mothods to be used for creating eStargz.
|
||||
type Compressor interface {
|
||||
// Writer returns WriteCloser to be used for writing a chunk to eStargz.
|
||||
// Everytime a chunk is written, the WriteCloser is closed and Writer is
|
||||
// called again for writing the next chunk.
|
||||
Writer(w io.Writer) (io.WriteCloser, error)
|
||||
|
||||
// WriteTOCAndFooter is called to write JTOC to the passed Writer.
|
||||
// diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
|
||||
// WriteTOCAndFooter can optionally write anything that affects DiffID calculation
|
||||
// (e.g. uncompressed TOC JSON).
|
||||
//
|
||||
// This function returns tocDgst that represents the digest of TOC that will be used
|
||||
// to verify this blob when it's parsed.
|
||||
WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
|
||||
}
|
||||
|
||||
// Decompressor represents the helper mothods to be used for parsing eStargz.
|
||||
type Decompressor interface {
|
||||
// Reader returns ReadCloser to be used for decompressing file payload.
|
||||
Reader(r io.Reader) (io.ReadCloser, error)
|
||||
|
||||
// FooterSize returns the size of the footer of this blob.
|
||||
FooterSize() int64
|
||||
|
||||
// ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
|
||||
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
|
||||
// the top until the TOC JSON).
|
||||
//
|
||||
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
|
||||
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
|
||||
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
|
||||
|
||||
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
|
||||
// of the underlying blob that has the range specified by ParseFooter method.
|
||||
//
|
||||
// This function returns tocDgst that represents the digest of TOC that will be used
|
||||
// to verify this blob. This must match to the value returned from
|
||||
// Compressor.WriteTOCAndFooter that is used when creating this blob.
|
||||
ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
|
||||
}
|
||||
|
Reference in New Issue
Block a user