mirror of
https://github.com/containers/skopeo.git
synced 2025-09-07 17:54:09 +00:00
Bump github.com/containers/storage from 1.36.0 to 1.37.0
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.36.0 to 1.37.0. - [Release notes](https://github.com/containers/storage/releases) - [Changelog](https://github.com/containers/storage/blob/main/docs/containers-storage-changes.md) - [Commits](https://github.com/containers/storage/compare/v1.36.0...v1.37.0) --- updated-dependencies: - dependency-name: github.com/containers/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
131
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
131
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
@@ -23,7 +23,6 @@
|
||||
package estargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
@@ -42,6 +41,7 @@ import (
|
||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
)
|
||||
|
||||
// A Reader permits random access reads from a stargz file.
|
||||
@@ -95,10 +95,10 @@ func WithTelemetry(telemetry *Telemetry) OpenOption {
|
||||
}
|
||||
}
|
||||
|
||||
// A func which takes start time and records the diff
|
||||
// MeasureLatencyHook is a func which takes start time and records the diff
|
||||
type MeasureLatencyHook func(time.Time)
|
||||
|
||||
// A struct which defines telemetry hooks. By implementing these hooks you should be able to record
|
||||
// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record
|
||||
// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...)
|
||||
type Telemetry struct {
|
||||
GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
|
||||
@@ -146,7 +146,7 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
|
||||
fSize := d.FooterSize()
|
||||
fOffset := positive(int64(len(footer)) - fSize)
|
||||
maybeTocBytes := footer[:fOffset]
|
||||
tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
|
||||
_, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
|
||||
if err != nil {
|
||||
allErr = append(allErr, err)
|
||||
continue
|
||||
@@ -187,7 +187,7 @@ func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr e
|
||||
for _, d := range []Decompressor{new(GzipDecompressor), new(legacyGzipDecompressor)} {
|
||||
fSize := d.FooterSize()
|
||||
fOffset := positive(int64(len(footer)) - fSize)
|
||||
tocOffset, _, err := d.ParseFooter(footer[fOffset:])
|
||||
_, tocOffset, _, err := d.ParseFooter(footer[fOffset:])
|
||||
if err == nil {
|
||||
return tocOffset, fSize, err
|
||||
}
|
||||
@@ -326,6 +326,10 @@ func (r *Reader) getOrCreateDir(d string) *TOCEntry {
|
||||
return e
|
||||
}
|
||||
|
||||
func (r *Reader) TOCDigest() digest.Digest {
|
||||
return r.tocDigest
|
||||
}
|
||||
|
||||
// VerifyTOC checks that the TOC JSON in the passed blob matches the
|
||||
// passed digests and that the TOC JSON contains digests for all chunks
|
||||
// contained in the blob. If the verification succceeds, this function
|
||||
@@ -335,7 +339,12 @@ func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
|
||||
if r.tocDigest != tocDigest {
|
||||
return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
|
||||
}
|
||||
return r.Verifiers()
|
||||
}
|
||||
|
||||
// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases
|
||||
// because this doesn't verify TOC.
|
||||
func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
|
||||
chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest
|
||||
regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest
|
||||
var chunkDigestMapIncomplete bool
|
||||
@@ -591,6 +600,11 @@ type currentCompressionWriter struct{ w *Writer }
|
||||
|
||||
func (ccw currentCompressionWriter) Write(p []byte) (int, error) {
|
||||
ccw.w.diffHash.Write(p)
|
||||
if ccw.w.gz == nil {
|
||||
if err := ccw.w.condOpenGz(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return ccw.w.gz.Write(p)
|
||||
}
|
||||
|
||||
@@ -601,6 +615,25 @@ func (w *Writer) chunkSize() int {
|
||||
return w.ChunkSize
|
||||
}
|
||||
|
||||
// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob.
|
||||
// TOC JSON and footer are removed.
|
||||
func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
|
||||
footerSize := c.FooterSize()
|
||||
if sr.Size() < footerSize {
|
||||
return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize)
|
||||
}
|
||||
footerOffset := sr.Size() - footerSize
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := sr.ReadAt(footer, footerOffset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobPayloadSize, _, _, err := c.ParseFooter(footer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse footer")
|
||||
}
|
||||
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
||||
}
|
||||
|
||||
// NewWriter returns a new stargz writer (gzip-based) writing to w.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
@@ -616,7 +649,7 @@ func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
|
||||
return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel))
|
||||
}
|
||||
|
||||
// NewWriterLevel returns a new stargz writer writing to w.
|
||||
// NewWriterWithCompressor returns a new stargz writer writing to w.
|
||||
// The compression method is configurable.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
@@ -696,29 +729,71 @@ func (w *Writer) condOpenGz() (err error) {
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be gzip compressed.
|
||||
// always be compressed by the specified compressor.
|
||||
func (w *Writer) AppendTar(r io.Reader) error {
|
||||
return w.appendTar(r, false)
|
||||
}
|
||||
|
||||
// AppendTarLossLess reads the tar or tar.gz file from r and appends
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be compressed by the specified compressor.
|
||||
//
|
||||
// The difference of this func with AppendTar is that this writes
|
||||
// the input tar stream into w without any modification (e.g. to header bytes).
|
||||
//
|
||||
// Note that if the input tar stream already contains TOC JSON, this returns
|
||||
// error because w cannot overwrite the TOC JSON to the one generated by w without
|
||||
// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz,
|
||||
// you shoud decompress it and remove TOC JSON in advance.
|
||||
func (w *Writer) AppendTarLossLess(r io.Reader) error {
|
||||
return w.appendTar(r, true)
|
||||
}
|
||||
|
||||
func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
||||
var src io.Reader
|
||||
br := bufio.NewReader(r)
|
||||
var tr *tar.Reader
|
||||
if isGzip(br) {
|
||||
// NewReader can't fail if isGzip returned true.
|
||||
zr, _ := gzip.NewReader(br)
|
||||
tr = tar.NewReader(zr)
|
||||
src = zr
|
||||
} else {
|
||||
tr = tar.NewReader(br)
|
||||
src = io.Reader(br)
|
||||
}
|
||||
dst := currentCompressionWriter{w}
|
||||
var tw *tar.Writer
|
||||
if !lossless {
|
||||
tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode.
|
||||
}
|
||||
tr := tar.NewReader(src)
|
||||
if lossless {
|
||||
tr.RawAccounting = true
|
||||
}
|
||||
for {
|
||||
h, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
if lossless {
|
||||
if remain := tr.RawBytes(); len(remain) > 0 {
|
||||
// Collect the remaining null bytes.
|
||||
// https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53
|
||||
if _, err := dst.Write(remain); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
|
||||
}
|
||||
if h.Name == TOCTarName {
|
||||
if cleanEntryName(h.Name) == TOCTarName {
|
||||
// It is possible for a layer to be "stargzified" twice during the
|
||||
// distribution lifecycle. So we reserve "TOCTarName" here to avoid
|
||||
// duplicated entries in the resulting layer.
|
||||
if lossless {
|
||||
// We cannot handle this in lossless way.
|
||||
return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -744,9 +819,14 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
if err := w.condOpenGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
tw := tar.NewWriter(currentCompressionWriter{w})
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
if tw != nil {
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := dst.Write(tr.RawBytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch h.Typeflag {
|
||||
case tar.TypeLink:
|
||||
@@ -808,7 +888,13 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
}
|
||||
|
||||
teeChunk := io.TeeReader(tee, chunkDigest.Hash())
|
||||
if _, err := io.CopyN(tw, teeChunk, chunkSize); err != nil {
|
||||
var out io.Writer
|
||||
if tw != nil {
|
||||
out = tw
|
||||
} else {
|
||||
out = dst
|
||||
}
|
||||
if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil {
|
||||
return fmt.Errorf("error copying %q: %v", h.Name, err)
|
||||
}
|
||||
ent.ChunkDigest = chunkDigest.Digest().String()
|
||||
@@ -825,11 +911,18 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
if payloadDigest != nil {
|
||||
regFileEntry.Digest = payloadDigest.Digest().String()
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
if tw != nil {
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
remainDest := ioutil.Discard
|
||||
if lossless {
|
||||
remainDest = dst // Preserve the remaining bytes in lossless mode
|
||||
}
|
||||
_, err := io.Copy(remainDest, src)
|
||||
return err
|
||||
}
|
||||
|
||||
// DiffID returns the SHA-256 of the uncompressed tar bytes.
|
||||
|
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
@@ -3,8 +3,9 @@ module github.com/containerd/stargz-snapshotter/estargz
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.13.5
|
||||
github.com/klauspost/compress v1.13.6
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/vbatts/tar-split v0.11.2
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
)
|
||||
|
18
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
18
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
@@ -1,8 +1,22 @@
|
||||
github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
30
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
30
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
@@ -124,31 +124,31 @@ func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Dig
|
||||
return parseTOCEStargz(r)
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) ParseFooter(p []byte) (tocOffset, tocSize int64, err error) {
|
||||
func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
|
||||
if len(p) != FooterSize {
|
||||
return 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
|
||||
return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
defer zr.Close()
|
||||
extra := zr.Header.Extra
|
||||
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
|
||||
if si1 != 'S' || si2 != 'G' {
|
||||
return 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
|
||||
return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
|
||||
}
|
||||
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
|
||||
return 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
|
||||
return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
|
||||
}
|
||||
if string(subfield[16:]) != "STARGZ" {
|
||||
return 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
|
||||
return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
}
|
||||
return tocOffset, 0, nil
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) FooterSize() int64 {
|
||||
@@ -165,27 +165,27 @@ func (gz *legacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst dige
|
||||
return parseTOCEStargz(r)
|
||||
}
|
||||
|
||||
func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (tocOffset, tocSize int64, err error) {
|
||||
func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
|
||||
if len(p) != legacyFooterSize {
|
||||
return 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
|
||||
return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
}
|
||||
defer zr.Close()
|
||||
extra := zr.Header.Extra
|
||||
if len(extra) != 16+len("STARGZ") {
|
||||
return 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
|
||||
return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
|
||||
}
|
||||
if string(extra[16:]) != "STARGZ" {
|
||||
return 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
|
||||
return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
}
|
||||
return tocOffset, 0, nil
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
|
||||
func (gz *legacyGzipDecompressor) FooterSize() int64 {
|
||||
|
494
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
494
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
@@ -148,93 +148,96 @@ func testBuild(t *testing.T, controllers ...TestingController) {
|
||||
srcCompression := srcCompression
|
||||
for _, cl := range controllers {
|
||||
cl := cl
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q-src=%d", cl, prefix, srcCompression), func(t *testing.T) {
|
||||
tarBlob := buildTarStatic(t, tt.in, prefix)
|
||||
// Test divideEntries()
|
||||
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
||||
if err != nil {
|
||||
t.Fatalf("faield to parse tar: %v", err)
|
||||
}
|
||||
var merged []*entry
|
||||
for _, part := range divideEntries(entries, 4) {
|
||||
merged = append(merged, part...)
|
||||
}
|
||||
if !reflect.DeepEqual(entries, merged) {
|
||||
for _, e := range entries {
|
||||
t.Logf("Original: %v", e.header)
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) {
|
||||
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
|
||||
// Test divideEntries()
|
||||
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse tar: %v", err)
|
||||
}
|
||||
for _, e := range merged {
|
||||
t.Logf("Merged: %v", e.header)
|
||||
var merged []*entry
|
||||
for _, part := range divideEntries(entries, 4) {
|
||||
merged = append(merged, part...)
|
||||
}
|
||||
if !reflect.DeepEqual(entries, merged) {
|
||||
for _, e := range entries {
|
||||
t.Logf("Original: %v", e.header)
|
||||
}
|
||||
for _, e := range merged {
|
||||
t.Logf("Merged: %v", e.header)
|
||||
}
|
||||
t.Errorf("divided entries couldn't be merged")
|
||||
return
|
||||
}
|
||||
t.Errorf("divided entries couldn't be merged")
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare sample data
|
||||
wantBuf := new(bytes.Buffer)
|
||||
sw := NewWriterWithCompressor(wantBuf, cl)
|
||||
sw.ChunkSize = tt.chunkSize
|
||||
if err := sw.AppendTar(tarBlob); err != nil {
|
||||
t.Fatalf("faield to append tar to want stargz: %v", err)
|
||||
}
|
||||
if _, err := sw.Close(); err != nil {
|
||||
t.Fatalf("faield to prepare want stargz: %v", err)
|
||||
}
|
||||
wantData := wantBuf.Bytes()
|
||||
want, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the want stargz: %v", err)
|
||||
}
|
||||
// Prepare sample data
|
||||
wantBuf := new(bytes.Buffer)
|
||||
sw := NewWriterWithCompressor(wantBuf, cl)
|
||||
sw.ChunkSize = tt.chunkSize
|
||||
if err := sw.AppendTar(tarBlob); err != nil {
|
||||
t.Fatalf("failed to append tar to want stargz: %v", err)
|
||||
}
|
||||
if _, err := sw.Close(); err != nil {
|
||||
t.Fatalf("failed to prepare want stargz: %v", err)
|
||||
}
|
||||
wantData := wantBuf.Bytes()
|
||||
want, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the want stargz: %v", err)
|
||||
}
|
||||
|
||||
// Prepare testing data
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(tt.chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("faield to build stargz: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
gotBuf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(gotBuf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
gotData := gotBuf.Bytes()
|
||||
got, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the got stargz: %v", err)
|
||||
}
|
||||
// Prepare testing data
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(tt.chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build stargz: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
gotBuf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(gotBuf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
gotData := gotBuf.Bytes()
|
||||
got, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the got stargz: %v", err)
|
||||
}
|
||||
|
||||
// Check DiffID is properly calculated
|
||||
rc.Close()
|
||||
diffID := rc.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, gotData)
|
||||
if diffID.String() != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
// Check DiffID is properly calculated
|
||||
rc.Close()
|
||||
diffID := rc.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, gotData)
|
||||
if diffID.String() != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
|
||||
// Compare as stargz
|
||||
if !isSameVersion(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz hasn't same json")
|
||||
return
|
||||
}
|
||||
if !isSameEntries(t, want, got) {
|
||||
t.Errorf("built stargz isn't same as the original")
|
||||
return
|
||||
}
|
||||
// Compare as stargz
|
||||
if !isSameVersion(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz hasn't same json")
|
||||
return
|
||||
}
|
||||
if !isSameEntries(t, want, got) {
|
||||
t.Errorf("built stargz isn't same as the original")
|
||||
return
|
||||
}
|
||||
|
||||
// Compare as tar.gz
|
||||
if !isSameTarGz(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz isn't same tar.gz")
|
||||
return
|
||||
}
|
||||
})
|
||||
// Compare as tar.gz
|
||||
if !isSameTarGz(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz isn't same tar.gz")
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -526,7 +529,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
dir("test2/"), // modified
|
||||
), allowedPrefix[0])),
|
||||
},
|
||||
@@ -544,7 +547,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", ""),
|
||||
file("foo.txt", "M"), // modified
|
||||
dir("test/"),
|
||||
@@ -567,7 +570,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified
|
||||
file("foo.txt", "a"),
|
||||
dir("test/"),
|
||||
@@ -593,7 +596,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", "bazbazbazbazbazbazbaz"),
|
||||
file("foo.txt", "a"),
|
||||
symlink("barlink", "test/bar.txt"),
|
||||
@@ -615,30 +618,33 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
cl := cl
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q", cl, prefix), func(t *testing.T) {
|
||||
// Get original tar file and chunk digests
|
||||
dgstMap := make(map[string]digest.Digest)
|
||||
tarBlob := buildTarStatic(t, tt.tarInit(t, dgstMap), prefix)
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) {
|
||||
// Get original tar file and chunk digests
|
||||
dgstMap := make(map[string]digest.Digest)
|
||||
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
|
||||
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to convert stargz: %v", err)
|
||||
}
|
||||
tocDigest := rc.TOCDigest()
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
newStargz := buf.Bytes()
|
||||
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
||||
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to convert stargz: %v", err)
|
||||
}
|
||||
tocDigest := rc.TOCDigest()
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
newStargz := buf.Bytes()
|
||||
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
||||
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
||||
|
||||
for _, check := range tt.checks {
|
||||
check(t, newStargz, tocDigest, dgstMap, cl)
|
||||
}
|
||||
})
|
||||
for _, check := range tt.checks {
|
||||
check(t, newStargz, tocDigest, dgstMap, cl)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1058,7 +1064,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
|
||||
if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
|
||||
return nil, 0, errors.Wrap(err, "error reading footer")
|
||||
}
|
||||
tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
|
||||
_, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to parse footer")
|
||||
}
|
||||
@@ -1085,11 +1091,15 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
in []tarEntry
|
||||
want []stargzCheck
|
||||
wantNumGz int // expected number of streams
|
||||
|
||||
wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
|
||||
wantFailOnLossLess bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
in: tarOf(),
|
||||
wantNumGz: 2, // TOC + footer
|
||||
name: "empty",
|
||||
in: tarOf(),
|
||||
wantNumGz: 2, // empty tar + TOC + footer
|
||||
wantNumGzLossLess: 3, // empty tar + TOC + footer
|
||||
want: checks(
|
||||
numTOCEntries(0),
|
||||
),
|
||||
@@ -1224,26 +1234,29 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
{
|
||||
name: "block_char_fifo",
|
||||
in: tarOf(
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Name: prefix + "b",
|
||||
Typeflag: tar.TypeBlock,
|
||||
Devmajor: 123,
|
||||
Devminor: 456,
|
||||
Format: format,
|
||||
})
|
||||
}),
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Name: prefix + "c",
|
||||
Typeflag: tar.TypeChar,
|
||||
Devmajor: 111,
|
||||
Devminor: 222,
|
||||
Format: format,
|
||||
})
|
||||
}),
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Name: prefix + "f",
|
||||
Typeflag: tar.TypeFifo,
|
||||
Format: format,
|
||||
})
|
||||
}),
|
||||
),
|
||||
@@ -1278,6 +1291,29 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
hasMode("foo3/bar5", os.FileMode(0755)),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "lossy",
|
||||
in: tarOf(
|
||||
dir("bar/", sampleOwner),
|
||||
dir("foo/", sampleOwner),
|
||||
file("foo/bar.txt", content, sampleOwner),
|
||||
file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error)
|
||||
),
|
||||
wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
|
||||
want: checks(
|
||||
numTOCEntries(3),
|
||||
hasDir("bar/"),
|
||||
hasDir("foo/"),
|
||||
hasFileLen("foo/bar.txt", len(content)),
|
||||
entryHasChildren("", "bar", "foo"),
|
||||
entryHasChildren("foo", "bar.txt"),
|
||||
hasChunkEntries("foo/bar.txt", 1),
|
||||
hasEntryOwner("bar/", sampleOwner),
|
||||
hasEntryOwner("foo/", sampleOwner),
|
||||
hasEntryOwner("foo/bar.txt", sampleOwner),
|
||||
),
|
||||
wantFailOnLossLess: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -1285,47 +1321,90 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
cl := cl
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q", cl, prefix), func(t *testing.T) {
|
||||
tr, cancel := buildTar(t, tt.in, prefix)
|
||||
defer cancel()
|
||||
var stargzBuf bytes.Buffer
|
||||
w := NewWriterWithCompressor(&stargzBuf, cl)
|
||||
w.ChunkSize = tt.chunkSize
|
||||
if err := w.AppendTar(tr); err != nil {
|
||||
t.Fatalf("Append: %v", err)
|
||||
}
|
||||
if _, err := w.Close(); err != nil {
|
||||
t.Fatalf("Writer.Close: %v", err)
|
||||
}
|
||||
b := stargzBuf.Bytes()
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
for _, lossless := range []bool{true, false} {
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) {
|
||||
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
|
||||
origTarDgstr := digest.Canonical.Digester()
|
||||
tr = io.TeeReader(tr, origTarDgstr.Hash())
|
||||
var stargzBuf bytes.Buffer
|
||||
w := NewWriterWithCompressor(&stargzBuf, cl)
|
||||
w.ChunkSize = tt.chunkSize
|
||||
if lossless {
|
||||
err := w.AppendTarLossLess(tr)
|
||||
if tt.wantFailOnLossLess {
|
||||
if err != nil {
|
||||
return // expected to fail
|
||||
}
|
||||
t.Fatalf("Append wanted to fail on lossless")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Append(lossless): %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := w.AppendTar(tr); err != nil {
|
||||
t.Fatalf("Append: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := w.Close(); err != nil {
|
||||
t.Fatalf("Writer.Close: %v", err)
|
||||
}
|
||||
b := stargzBuf.Bytes()
|
||||
|
||||
diffID := w.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, b)
|
||||
if diffID != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
if lossless {
|
||||
// Check if the result blob reserves original tar metadata
|
||||
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl)
|
||||
if err != nil {
|
||||
t.Errorf("failed to decompress blob: %v", err)
|
||||
return
|
||||
}
|
||||
defer rc.Close()
|
||||
resultDgstr := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil {
|
||||
t.Errorf("failed to read result decompressed blob: %v", err)
|
||||
return
|
||||
}
|
||||
if resultDgstr.Digest() != origTarDgstr.Digest() {
|
||||
t.Errorf("lossy compression occurred: digest=%v; want %v",
|
||||
resultDgstr.Digest(), origTarDgstr.Digest())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
got := cl.CountStreams(t, b)
|
||||
if got != tt.wantNumGz {
|
||||
t.Errorf("number of streams = %d; want %d", got, tt.wantNumGz)
|
||||
}
|
||||
diffID := w.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, b)
|
||||
if diffID != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
|
||||
telemetry, checkCalled := newCalledTelemetry()
|
||||
r, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
|
||||
WithDecompressors(cl),
|
||||
WithTelemetry(telemetry),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("stargz.Open: %v", err)
|
||||
got := cl.CountStreams(t, b)
|
||||
wantNumGz := tt.wantNumGz
|
||||
if lossless && tt.wantNumGzLossLess > 0 {
|
||||
wantNumGz = tt.wantNumGzLossLess
|
||||
}
|
||||
if got != wantNumGz {
|
||||
t.Errorf("number of streams = %d; want %d", got, wantNumGz)
|
||||
}
|
||||
|
||||
telemetry, checkCalled := newCalledTelemetry()
|
||||
r, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
|
||||
WithDecompressors(cl),
|
||||
WithTelemetry(telemetry),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("stargz.Open: %v", err)
|
||||
}
|
||||
if err := checkCalled(); err != nil {
|
||||
t.Errorf("telemetry failure: %v", err)
|
||||
}
|
||||
for _, want := range tt.want {
|
||||
want.check(t, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
if err := checkCalled(); err != nil {
|
||||
t.Errorf("telemetry failure: %v", err)
|
||||
}
|
||||
for _, want := range tt.want {
|
||||
want.check(t, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1655,49 +1734,41 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
|
||||
func tarOf(s ...tarEntry) []tarEntry { return s }
|
||||
|
||||
type tarEntry interface {
|
||||
appendTar(tw *tar.Writer, prefix string) error
|
||||
appendTar(tw *tar.Writer, prefix string, format tar.Format) error
|
||||
}
|
||||
|
||||
type tarEntryFunc func(*tar.Writer, string) error
|
||||
type tarEntryFunc func(*tar.Writer, string, tar.Format) error
|
||||
|
||||
func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string) error { return f(tw, prefix) }
|
||||
|
||||
func buildTar(t *testing.T, ents []tarEntry, prefix string) (r io.Reader, cancel func()) {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
tw := tar.NewWriter(pw)
|
||||
for _, ent := range ents {
|
||||
if err := ent.appendTar(tw, prefix); err != nil {
|
||||
t.Errorf("building input tar: %v", err)
|
||||
pw.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Errorf("closing write of input tar: %v", err)
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr, func() { go pr.Close(); go pw.Close() }
|
||||
func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
return f(tw, prefix, format)
|
||||
}
|
||||
|
||||
func buildTarStatic(t *testing.T, ents []tarEntry, prefix string) *io.SectionReader {
|
||||
func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
|
||||
format := tar.FormatUnknown
|
||||
for _, opt := range opts {
|
||||
switch v := opt.(type) {
|
||||
case tar.Format:
|
||||
format = v
|
||||
default:
|
||||
panic(fmt.Errorf("unsupported opt for buildTar: %v", opt))
|
||||
}
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, ent := range ents {
|
||||
if err := ent.appendTar(tw, prefix); err != nil {
|
||||
if err := ent.appendTar(tw, prefix, format); err != nil {
|
||||
t.Fatalf("building input tar: %v", err)
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Errorf("closing write of input tar: %v", err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works
|
||||
return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
|
||||
}
|
||||
|
||||
func dir(name string, opts ...interface{}) tarEntry {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
var o owner
|
||||
mode := os.FileMode(0755)
|
||||
for _, opt := range opts {
|
||||
@@ -1723,6 +1794,7 @@ func dir(name string, opts ...interface{}) tarEntry {
|
||||
Mode: tm,
|
||||
Uid: o.uid,
|
||||
Gid: o.gid,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -1737,7 +1809,7 @@ type owner struct {
|
||||
}
|
||||
|
||||
func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
var xattrs xAttr
|
||||
var o owner
|
||||
mode := os.FileMode(0644)
|
||||
@@ -1760,6 +1832,9 @@ func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(xattrs) > 0 {
|
||||
format = tar.FormatPAX // only PAX supports xattrs
|
||||
}
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: prefix + name,
|
||||
@@ -1768,6 +1843,7 @@ func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
Size: int64(len(contents)),
|
||||
Uid: o.uid,
|
||||
Gid: o.gid,
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1777,78 +1853,76 @@ func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
}
|
||||
|
||||
func symlink(name, target string) tarEntry {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
return tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Name: prefix + name,
|
||||
Linkname: target,
|
||||
Mode: 0644,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func link(name string, linkname string) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeLink,
|
||||
Name: prefix + name,
|
||||
Linkname: linkname,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeLink,
|
||||
Name: prefix + name,
|
||||
Linkname: linkname,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func chardev(name string, major, minor int64) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeChar,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeChar,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func blockdev(name string, major, minor int64) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeBlock,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeBlock,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
func fifo(name string) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeFifo,
|
||||
Name: prefix + name,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeFifo,
|
||||
Name: prefix + name,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func prefetchLandmark() tarEntry {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
if err := w.WriteHeader(&tar.Header{
|
||||
Name: PrefetchLandmark,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len([]byte{landmarkContents})),
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1861,11 +1935,12 @@ func prefetchLandmark() tarEntry {
|
||||
}
|
||||
|
||||
func noPrefetchLandmark() tarEntry {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
if err := w.WriteHeader(&tar.Header{
|
||||
Name: NoPrefetchLandmark,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len([]byte{landmarkContents})),
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1899,11 +1974,12 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin
|
||||
n += size
|
||||
}
|
||||
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
if err := w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: prefix + name,
|
||||
Size: int64(len(content)),
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
6
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
6
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
@@ -290,7 +290,7 @@ type Compressor interface {
|
||||
WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
|
||||
}
|
||||
|
||||
// Deompressor represents the helper mothods to be used for parsing eStargz.
|
||||
// Decompressor represents the helper mothods to be used for parsing eStargz.
|
||||
type Decompressor interface {
|
||||
// Reader returns ReadCloser to be used for decompressing file payload.
|
||||
Reader(r io.Reader) (io.ReadCloser, error)
|
||||
@@ -299,10 +299,12 @@ type Decompressor interface {
|
||||
FooterSize() int64
|
||||
|
||||
// ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
|
||||
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
|
||||
// the top until the TOC JSON).
|
||||
//
|
||||
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
|
||||
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
|
||||
ParseFooter(p []byte) (tocOffset, tocSize int64, err error)
|
||||
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
|
||||
|
||||
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
|
||||
// of the underlying blob that has the range specified by ParseFooter method.
|
||||
|
Reference in New Issue
Block a user