Bump github.com/containers/storage from 1.38.2 to 1.39.0

Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.38.2 to 1.39.0.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/main/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.38.2...v1.39.0)

---
updated-dependencies:
- dependency-name: github.com/containers/storage
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2022-03-24 17:14:41 +00:00 committed by GitHub
parent cc2445de81
commit 7f6b0e39d0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 2529 additions and 567 deletions

5
go.mod
View File

@ -3,16 +3,13 @@ module github.com/containers/skopeo
go 1.15
require (
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/containerd/containerd v1.6.1 // indirect
github.com/containers/common v0.47.4
github.com/containers/image/v5 v5.20.0
github.com/containers/ocicrypt v1.1.3
github.com/containers/storage v1.38.2
github.com/containers/storage v1.39.0
github.com/docker/docker v20.10.14+incompatible
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/klauspost/compress v1.15.0 // indirect
github.com/moby/sys/mountinfo v0.6.0 // indirect
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
github.com/opencontainers/image-tools v1.0.0-rc3

10
go.sum
View File

@ -269,8 +269,9 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.11.0 h1:t0IW5kOmY7AXDAWRUs2uVzDhijAUOAYVr/dyRhOQvBg=
github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac=
github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@ -307,8 +308,9 @@ github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.3 h1:uMxn2wTb4nDR7GqG3rnZSfpJXqWURfzZ7nKydzIeKpA=
github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/containers/storage v1.38.2 h1:8bAIxnVBGKzMw5EWCivVj24bztQT6IkDp4uHiyhnzwE=
github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ=
github.com/containers/storage v1.39.0 h1:NV93CVx6KAQ04cldeJyqa7uDZivhmO3rXla1cyn75dk=
github.com/containers/storage v1.39.0/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -672,8 +674,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=

View File

@ -26,6 +26,7 @@ import (
"archive/tar"
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
@ -38,7 +39,6 @@ import (
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
"github.com/klauspost/compress/zstd"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
@ -142,7 +142,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
defer func() {
if rErr != nil {
if err := layerFiles.CleanupAll(); err != nil {
rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err)
rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
}
}
}()
@ -307,7 +307,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
// Import tar file.
intar, err := importTar(in)
if err != nil {
return nil, errors.Wrap(err, "failed to sort")
return nil, fmt.Errorf("failed to sort: %w", err)
}
// Sort the tar file respecting to the prioritized files list.
@ -318,7 +318,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
*missedPrioritized = append(*missedPrioritized, l)
continue // allow not found
}
return nil, errors.Wrap(err, "failed to sort tar entries")
return nil, fmt.Errorf("failed to sort tar entries: %w", err)
}
}
if len(prioritized) == 0 {
@ -371,7 +371,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
tf := &tarFile{}
pw, err := newCountReader(in)
if err != nil {
return nil, errors.Wrap(err, "failed to make position watcher")
return nil, fmt.Errorf("failed to make position watcher: %w", err)
}
tr := tar.NewReader(pw)
@ -383,7 +383,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
if err == io.EOF {
break
} else {
return nil, errors.Wrap(err, "failed to parse tar file")
return nil, fmt.Errorf("failed to parse tar file, %w", err)
}
}
switch cleanEntryName(h.Name) {
@ -420,7 +420,7 @@ func moveRec(name string, in *tarFile, out *tarFile) error {
_, okIn := in.get(name)
_, okOut := out.get(name)
if !okIn && !okOut {
return errors.Wrapf(errNotFound, "file: %q", name)
return fmt.Errorf("file: %q: %w", name, errNotFound)
}
parent, _ := path.Split(strings.TrimSuffix(name, "/"))

View File

@ -27,6 +27,7 @@ import (
"bytes"
"compress/gzip"
"crypto/sha256"
"errors"
"fmt"
"hash"
"io"
@ -40,7 +41,6 @@ import (
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/archive/tar"
)
@ -385,8 +385,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
if e.Digest != "" {
d, err := digest.Parse(e.Digest)
if err != nil {
return nil, errors.Wrapf(err,
"failed to parse regular file digest %q", e.Digest)
return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err)
}
regDigestMap[e.Offset] = d
} else {
@ -401,8 +400,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
if e.ChunkDigest != "" {
d, err := digest.Parse(e.ChunkDigest)
if err != nil {
return nil, errors.Wrapf(err,
"failed to parse chunk digest %q", e.ChunkDigest)
return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err)
}
chunkDigestMap[e.Offset] = d
} else {
@ -647,7 +645,7 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
}
blobPayloadSize, _, _, err := c.ParseFooter(footer)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse footer")
return nil, fmt.Errorf("failed to parse footer: %w", err)
}
return c.Reader(io.LimitReader(sr, blobPayloadSize))
}

View File

@ -3,9 +3,8 @@ module github.com/containerd/stargz-snapshotter/estargz
go 1.16
require (
github.com/klauspost/compress v1.14.2
github.com/klauspost/compress v1.15.1
github.com/opencontainers/go-digest v1.0.0
github.com/pkg/errors v0.9.1
github.com/vbatts/tar-split v0.11.2
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
)

View File

@ -1,12 +1,10 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=

View File

@ -34,7 +34,6 @@ import (
"strconv"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type gzipCompression struct {
@ -150,7 +149,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t
}
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
if err != nil {
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
}
return tocOffset, tocOffset, 0, nil
}
@ -179,7 +178,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
}
defer zr.Close()
extra := zr.Header.Extra
@ -191,7 +190,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff
}
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
if err != nil {
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
}
return tocOffset, tocOffset, 0, nil
}

View File

@ -28,6 +28,7 @@ import (
"compress/gzip"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@ -41,7 +42,6 @@ import (
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
"github.com/klauspost/compress/zstd"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// TestingController is Compression with some helper methods necessary for testing.
@ -1062,18 +1062,18 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
fSize := controller.FooterSize()
footer := make([]byte, fSize)
if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
return nil, 0, errors.Wrap(err, "error reading footer")
return nil, 0, fmt.Errorf("error reading footer: %w", err)
}
_, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
if err != nil {
return nil, 0, errors.Wrapf(err, "failed to parse footer")
return nil, 0, fmt.Errorf("failed to parse footer: %w", err)
}
// Decode the TOC JSON
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
decodedJTOC, _, err = controller.ParseTOC(tocReader)
if err != nil {
return nil, 0, errors.Wrap(err, "failed to parse TOC")
return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
}
return decodedJTOC, tocOffset, nil
}

View File

@ -1 +1 @@
1.38.2
1.39.0

View File

@ -84,8 +84,17 @@ type ContainerStore interface {
// SetNames updates the list of names associated with the container
// with the specified ID.
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
// AddNames adds the supplied values to the list of names associated with the container with
// the specified id.
AddNames(id string, names []string) error
// RemoveNames removes the supplied values from the list of names associated with the container with
// the specified id.
RemoveNames(id string, names []string) error
// Get retrieves information about a container given an ID or name.
Get(id string) (*Container, error)
@ -324,6 +333,12 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID))
}
}
if err := hasOverlappingRanges(options.UIDMap); err != nil {
return nil, err
}
if err := hasOverlappingRanges(options.GIDMap); err != nil {
return nil, err
}
if err == nil {
container = &Container{
ID: id,
@ -371,22 +386,40 @@ func (r *containerStore) removeName(container *Container, name string) {
container.Names = stringSliceWithoutValue(container.Names, name)
}
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
func (r *containerStore) SetNames(id string, names []string) error {
names = dedupeNames(names)
if container, ok := r.lookup(id); ok {
for _, name := range container.Names {
delete(r.byname, name)
}
for _, name := range names {
if otherContainer, ok := r.byname[name]; ok {
r.removeName(otherContainer, name)
}
r.byname[name] = container
}
container.Names = names
return r.Save()
return r.updateNames(id, names, setNames)
}
func (r *containerStore) AddNames(id string, names []string) error {
return r.updateNames(id, names, addNames)
}
func (r *containerStore) RemoveNames(id string, names []string) error {
return r.updateNames(id, names, removeNames)
}
func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error {
container, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
}
return ErrContainerUnknown
oldNames := container.Names
names, err := applyNameOperation(oldNames, names, op)
if err != nil {
return err
}
for _, name := range oldNames {
delete(r.byname, name)
}
for _, name := range names {
if otherContainer, ok := r.byname[name]; ok {
r.removeName(otherContainer, name)
}
r.byname[name] = container
}
container.Names = names
return r.Save()
}
func (r *containerStore) Delete(id string) error {

View File

@ -50,11 +50,14 @@ func chownByMapsMain() {
if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 {
toHost = nil
}
chowner := newLChowner()
chown := func(path string, info os.FileInfo, _ error) error {
if path == "." {
return nil
}
return platformLChown(path, info, toHost, toContainer)
return chowner.LChown(path, info, toHost, toContainer)
}
if err := pwalk.Walk(".", chown); err != nil {
fmt.Fprintf(os.Stderr, "error during chown: %v", err)

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package graphdriver
@ -6,17 +7,50 @@ import (
"errors"
"fmt"
"os"
"sync"
"syscall"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
)
func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
type inode struct {
Dev uint64
Ino uint64
}
type platformChowner struct {
mutex sync.Mutex
inodes map[inode]bool
}
func newLChowner() *platformChowner {
return &platformChowner{
inodes: make(map[inode]bool),
}
}
func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
st, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
i := inode{
Dev: uint64(st.Dev),
Ino: uint64(st.Ino),
}
c.mutex.Lock()
_, found := c.inodes[i]
if !found {
c.inodes[i] = true
}
c.mutex.Unlock()
if found {
return nil
}
// Map an on-disk UID/GID pair from host to container
// using the first map, then back to the host using the
// second map. Skip that first step if they're 0, to

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package graphdriver
@ -9,6 +10,13 @@ import (
"github.com/containers/storage/pkg/idtools"
)
func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
type platformChowner struct {
}
func newLChowner() *platformChowner {
return &platformChowner{}
}
func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
return &os.PathError{"lchown", path, syscall.EWINDOWS}
}

View File

@ -1,3 +1,4 @@
//go:build linux
// +build linux
package overlay
@ -291,6 +292,31 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
backingFs = fsName
}
runhome := filepath.Join(options.RunRoot, filepath.Base(home))
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
// Create the driver home dir
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
return nil, err
}
if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
return nil, err
}
if opts.mountProgram == "" {
if supported, err := SupportsNativeOverlay(home, runhome); err != nil {
return nil, err
} else if !supported {
if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
opts.mountProgram = path
}
}
}
if opts.mountProgram != "" {
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil {
m := os.FileMode(0700)
@ -315,20 +341,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
}
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
// Create the driver home dir
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
return nil, err
}
runhome := filepath.Join(options.RunRoot, filepath.Base(home))
if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
return nil, err
}
var usingMetacopy bool
var supportsDType bool
var supportsVolatile *bool
@ -568,14 +580,11 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) (
return err
}
func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
if os.Geteuid() != 0 || graphroot == "" || rundir == "" {
func SupportsNativeOverlay(home, runhome string) (bool, error) {
if os.Geteuid() != 0 || home == "" || runhome == "" {
return false, nil
}
home := filepath.Join(graphroot, "overlay")
runhome := filepath.Join(rundir, "overlay")
var contents string
flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home))
if err == nil {
@ -919,7 +928,9 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
defer func() {
// Clean up on failure
if retErr != nil {
os.RemoveAll(dir)
if err2 := os.RemoveAll(dir); err2 != nil {
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
}
}
}()
@ -1166,6 +1177,9 @@ func (d *Driver) Remove(id string) error {
// under each layer has a symlink created for it under the linkDir. If the symlink does not
// exist, it creates them
func (d *Driver) recreateSymlinks() error {
// We have at most 3 corrective actions per layer, so 10 iterations is plenty.
const maxIterations = 10
// List all the directories under the home directory
dirs, err := ioutil.ReadDir(d.home)
if err != nil {
@ -1183,6 +1197,7 @@ func (d *Driver) recreateSymlinks() error {
// Keep looping as long as we take some corrective action in each iteration
var errs *multierror.Error
madeProgress := true
iterations := 0
for madeProgress {
errs = nil
madeProgress = false
@ -1233,7 +1248,12 @@ func (d *Driver) recreateSymlinks() error {
if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" {
errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target))
// force the link to be recreated on the next pass
os.Remove(filepath.Join(linksDir, link.Name()))
if err := os.Remove(filepath.Join(linksDir, link.Name())); err != nil {
if !os.IsNotExist(err) {
errs = multierror.Append(errs, errors.Wrapf(err, "removing link %q", link))
} // else dont report any error, but also dont set madeProgress.
continue
}
madeProgress = true
continue
}
@ -1243,6 +1263,8 @@ func (d *Driver) recreateSymlinks() error {
linkFile := filepath.Join(d.dir(targetID), "link")
data, err := ioutil.ReadFile(linkFile)
if err != nil || string(data) != link.Name() {
// NOTE: If two or more links point to the same target, we will update linkFile
// with every value of link.Name(), and set madeProgress = true every time.
if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID))
continue
@ -1250,6 +1272,11 @@ func (d *Driver) recreateSymlinks() error {
madeProgress = true
}
}
iterations++
if iterations >= maxIterations {
errs = multierror.Append(errs, fmt.Errorf("Reached %d iterations in overlay graph drivers recreateSymlink, giving up", iterations))
break
}
}
if errs != nil {
return errs.ErrorOrNil()
@ -1443,6 +1470,21 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
workdir := path.Join(dir, "work")
if d.options.mountProgram == "" && unshare.IsRootless() {
optsList = append(optsList, "userxattr")
}
if options.Volatile && !hasVolatileOption(optsList) {
supported, err := d.getSupportsVolatile()
if err != nil {
return "", err
}
// If "volatile" is not supported by the file system, just ignore the request
if supported {
optsList = append(optsList, "volatile")
}
}
var opts string
if readWrite {
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir)
@ -1450,22 +1492,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
}
if len(optsList) > 0 {
opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts)
}
if d.options.mountProgram == "" && unshare.IsRootless() {
opts = fmt.Sprintf("%s,userxattr", opts)
}
// If "volatile" is not supported by the file system, just ignore the request
if options.Volatile && !hasVolatileOption(strings.Split(opts, ",")) {
supported, err := d.getSupportsVolatile()
if err != nil {
return "", err
}
if supported {
opts = fmt.Sprintf("%s,volatile", opts)
}
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
}
mountData := label.FormatMountLabel(opts, options.MountLabel)
@ -1474,10 +1501,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
pageSize := unix.Getpagesize()
// Use relative paths and mountFrom when the mount data has exceeded
// the page size. The mount syscall fails if the mount data cannot
// fit within a page and relative links make the mount data much
// smaller at the expense of requiring a fork exec to chroot.
if d.options.mountProgram != "" {
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
if !disableShifting {
@ -1503,18 +1526,26 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
return nil
}
} else if len(mountData) > pageSize {
} else if len(mountData) >= pageSize {
// Use relative paths and mountFrom when the mount data has exceeded
// the page size. The mount syscall fails if the mount data cannot
// fit within a page and relative links make the mount data much
// smaller at the expense of requiring a fork exec to chroot.
workdir = path.Join(id, "work")
//FIXME: We need to figure out to get this to work with additional stores
if readWrite {
diffDir := path.Join(id, "diff")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir)
} else {
opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":"))
opts = fmt.Sprintf("lowerdir=%s", strings.Join(relLowers, ":"))
}
if len(optsList) > 0 {
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
}
mountData = label.FormatMountLabel(opts, options.MountLabel)
if len(mountData) > pageSize {
return "", fmt.Errorf("cannot mount layer, mount label %q too large %d > page size %d", options.MountLabel, len(mountData), pageSize)
if len(mountData) >= pageSize {
return "", fmt.Errorf("cannot mount layer, mount label %q too large %d >= page size %d", options.MountLabel, len(mountData), pageSize)
}
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
return mountFrom(d.home, source, target, mType, flags, label)

View File

@ -1,6 +1,8 @@
package storage
import (
"errors"
"github.com/containers/storage/types"
)
@ -55,4 +57,9 @@ var (
ErrStoreIsReadOnly = types.ErrStoreIsReadOnly
// ErrNotSupported is returned when the requested functionality is not supported.
ErrNotSupported = types.ErrNotSupported
// ErrInvalidMappings is returned when the specified mappings are invalid.
ErrInvalidMappings = types.ErrInvalidMappings
// ErrInvalidNameOperation is returned when updateName is called with invalid operation.
// Internal error
errInvalidUpdateNameOperation = errors.New("invalid update name operation")
)

View File

@ -4,26 +4,26 @@ module github.com/containers/storage
require (
github.com/BurntSushi/toml v1.0.0
github.com/Microsoft/go-winio v0.5.1
github.com/Microsoft/go-winio v0.5.2
github.com/Microsoft/hcsshim v0.9.2
github.com/containerd/stargz-snapshotter/estargz v0.11.0
github.com/containerd/stargz-snapshotter/estargz v0.11.3
github.com/cyphar/filepath-securejoin v0.2.3
github.com/docker/go-units v0.4.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.14.2
github.com/klauspost/compress v1.15.1
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/sys/mountinfo v0.5.0
github.com/moby/sys/mountinfo v0.6.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/runc v1.1.0
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.10.0
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.8.1
github.com/stretchr/testify v1.7.0
github.com/stretchr/testify v1.7.1
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/ulikunitz/xz v0.5.10

View File

@ -47,8 +47,8 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
@ -176,8 +176,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.11.0 h1:t0IW5kOmY7AXDAWRUs2uVzDhijAUOAYVr/dyRhOQvBg=
github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac=
github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@ -424,8 +424,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -466,8 +466,9 @@ github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQ
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo=
github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -621,8 +622,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=

View File

@ -1,6 +1,9 @@
package storage
import (
"fmt"
"strings"
"github.com/containers/storage/pkg/idtools"
"github.com/google/go-intervals/intervalset"
"github.com/pkg/errors"
@ -218,3 +221,45 @@ func maxInt(a, b int) int {
}
return a
}
func hasOverlappingRanges(mappings []idtools.IDMap) error {
hostIntervals := intervalset.Empty()
containerIntervals := intervalset.Empty()
var conflicts []string
for _, m := range mappings {
c := interval{start: m.ContainerID, end: m.ContainerID + m.Size}
h := interval{start: m.HostID, end: m.HostID + m.Size}
added := false
overlaps := false
containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool {
overlaps = true
return false
})
if overlaps {
conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size))
added = true
}
containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c}))
hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool {
overlaps = true
return false
})
if overlaps && !added {
conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size))
}
hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h}))
}
if conflicts != nil {
if len(conflicts) == 1 {
return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mapping %s conflicts with other mappings", conflicts[0])
}
return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mappings %s conflict with other mappings", strings.Join(conflicts, ", "))
}
return nil
}

View File

@ -136,8 +136,19 @@ type ImageStore interface {
// SetNames replaces the list of names associated with an image with the
// supplied values. The values are expected to be valid normalized
// named image references.
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
// AddNames adds the supplied values to the list of names associated with the image with
// the specified id. The values are expected to be valid normalized
// named image references.
AddNames(id string, names []string) error
// RemoveNames removes the supplied values from the list of names associated with the image with
// the specified id. The values are expected to be valid normalized
// named image references.
RemoveNames(id string, names []string) error
// Delete removes the record of the image.
Delete(id string) error
@ -425,37 +436,36 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
if created.IsZero() {
created = time.Now().UTC()
}
if err == nil {
image = &Image{
ID: id,
Digest: searchableDigest,
Digests: nil,
Names: names,
TopLayer: layer,
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: created,
Flags: make(map[string]interface{}),
}
err := image.recomputeDigests()
if err != nil {
return nil, errors.Wrapf(err, "error validating digests for new image")
}
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
for _, name := range names {
r.byname[name] = image
}
for _, digest := range image.Digests {
list := r.bydigest[digest]
r.bydigest[digest] = append(list, image)
}
err = r.Save()
image = copyImage(image)
image = &Image{
ID: id,
Digest: searchableDigest,
Digests: nil,
Names: names,
TopLayer: layer,
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: created,
Flags: make(map[string]interface{}),
}
err = image.recomputeDigests()
if err != nil {
return nil, errors.Wrapf(err, "error validating digests for new image")
}
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
for _, name := range names {
r.byname[name] = image
}
for _, digest := range image.Digests {
list := r.bydigest[digest]
r.bydigest[digest] = append(list, image)
}
err = r.Save()
image = copyImage(image)
return image, err
}
@ -506,26 +516,44 @@ func (i *Image) addNameToHistory(name string) {
i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...))
}
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
func (r *imageStore) SetNames(id string, names []string) error {
return r.updateNames(id, names, setNames)
}
func (r *imageStore) AddNames(id string, names []string) error {
return r.updateNames(id, names, addNames)
}
func (r *imageStore) RemoveNames(id string, names []string) error {
return r.updateNames(id, names, removeNames)
}
func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath())
}
names = dedupeNames(names)
if image, ok := r.lookup(id); ok {
for _, name := range image.Names {
delete(r.byname, name)
}
for _, name := range names {
if otherImage, ok := r.byname[name]; ok {
r.removeName(otherImage, name)
}
r.byname[name] = image
image.addNameToHistory(name)
}
image.Names = names
return r.Save()
image, ok := r.lookup(id)
if !ok {
return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
oldNames := image.Names
names, err := applyNameOperation(oldNames, names, op)
if err != nil {
return err
}
for _, name := range oldNames {
delete(r.byname, name)
}
for _, name := range names {
if otherImage, ok := r.byname[name]; ok {
r.removeName(otherImage, name)
}
r.byname[name] = image
image.addNameToHistory(name)
}
image.Names = names
return r.Save()
}
func (r *imageStore) Delete(id string) error {

View File

@ -221,8 +221,17 @@ type LayerStore interface {
// SetNames replaces the list of names associated with a layer with the
// supplied values.
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
// AddNames adds the supplied values to the list of names associated with the layer with the
// specified id.
AddNames(id string, names []string) error
// RemoveNames remove the supplied values from the list of names associated with the layer with the
// specified id.
RemoveNames(id string, names []string) error
// Delete deletes a layer with the specified name or ID.
Delete(id string) error
@ -399,14 +408,13 @@ func (r *layerStore) Load() error {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := cleanup.(bool); ok && b {
err = r.deleteInternal(layer.ID)
if err != nil {
break
}
shouldSave = true
if layerHasIncompleteFlag(layer) {
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
err = r.deleteInternal(layer.ID)
if err != nil {
break
}
shouldSave = true
}
}
}
@ -742,26 +750,17 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
}
if moreOptions.TemplateLayer != "" {
if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
if id != "" {
return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
}
return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer)
return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
}
oldMappings = templateIDMappings
} else {
if writeable {
if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
if id != "" {
return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
}
return nil, -1, errors.Wrapf(err, "error creating read-write layer")
return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
}
} else {
if err = r.driver.Create(id, parent, &opts); err != nil {
if id != "" {
return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
}
return nil, -1, errors.Wrapf(err, "error creating layer")
return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
}
}
oldMappings = parentMappings
@ -770,7 +769,9 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
r.driver.Remove(id)
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2)
}
return nil, -1, err
}
}
@ -795,21 +796,26 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
for flag, value := range flags {
layer.Flags[flag] = value
}
savedIncompleteLayer := false
if diff != nil {
layer.Flags[incompleteFlag] = true
err = r.Save()
if err != nil {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
r.driver.Remove(id)
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure saving incomplete layer metadata, error deleting layer %#v: %v", id, err2)
}
return nil, -1, err
}
savedIncompleteLayer = true
size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
if err != nil {
if r.Delete(layer.ID) != nil {
if err2 := r.Delete(layer.ID); err2 != nil {
// Either a driver error or an error saving.
// We now have a layer that's been marked for
// deletion but which we failed to remove.
logrus.Errorf("While recovering from a failure applying layer diff, error deleting layer %#v: %v", layer.ID, err2)
}
return nil, -1, err
}
@ -817,9 +823,20 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
}
err = r.Save()
if err != nil {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
r.driver.Remove(id)
if savedIncompleteLayer {
if err2 := r.Delete(layer.ID); err2 != nil {
// Either a driver error or an error saving.
// We now have a layer that's been marked for
// deletion but which we failed to remove.
logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v: %v", layer.ID, err2)
}
} else {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v in graph driver: %v", id, err2)
}
}
return nil, -1, err
}
layer = copyLayer(layer)
@ -1032,25 +1049,43 @@ func (r *layerStore) removeName(layer *Layer, name string) {
layer.Names = stringSliceWithoutValue(layer.Names, name)
}
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
func (r *layerStore) SetNames(id string, names []string) error {
return r.updateNames(id, names, setNames)
}
func (r *layerStore) AddNames(id string, names []string) error {
return r.updateNames(id, names, addNames)
}
func (r *layerStore) RemoveNames(id string, names []string) error {
return r.updateNames(id, names, removeNames)
}
func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
}
names = dedupeNames(names)
if layer, ok := r.lookup(id); ok {
for _, name := range layer.Names {
delete(r.byname, name)
}
for _, name := range names {
if otherLayer, ok := r.byname[name]; ok {
r.removeName(otherLayer, name)
}
r.byname[name] = layer
}
layer.Names = names
return r.Save()
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
}
return ErrLayerUnknown
oldNames := layer.Names
names, err := applyNameOperation(oldNames, names, op)
if err != nil {
return err
}
for _, name := range oldNames {
delete(r.byname, name)
}
for _, name := range names {
if otherLayer, ok := r.byname[name]; ok {
r.removeName(otherLayer, name)
}
r.byname[name] = layer
}
layer.Names = names
return r.Save()
}
func (r *layerStore) datadir(id string) string {
@ -1149,6 +1184,17 @@ func (r *layerStore) tspath(id string) string {
return filepath.Join(r.layerdir, id+tarSplitSuffix)
}
// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true
func layerHasIncompleteFlag(layer *Layer) bool {
// layer.Flags[…] is defined to succeed and return ok == false if Flags == nil
if flagValue, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := flagValue.(bool); ok && b {
return true
}
}
return false
}
func (r *layerStore) deleteInternal(id string) error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
@ -1157,6 +1203,18 @@ func (r *layerStore) deleteInternal(id string) error {
if !ok {
return ErrLayerUnknown
}
// Ensure that if we are interrupted, the layer will be cleaned up.
if !layerHasIncompleteFlag(layer) {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
layer.Flags[incompleteFlag] = true
if err := r.Save(); err != nil {
return err
}
}
// We never unset incompleteFlag; below, we remove the entire object from r.layers.
id = layer.ID
err := r.driver.Remove(id)
if err != nil {

View File

@ -108,35 +108,32 @@ func (c *layersCache) load() error {
}
bigData, err := c.store.LayerBigData(r.ID, cacheKey)
if err != nil {
if errors.Cause(err) == os.ErrNotExist {
// if the cache areadly exists, read and use it
if err == nil {
defer bigData.Close()
metadata, err := readMetadataFromCache(bigData)
if err == nil {
c.addLayer(r.ID, metadata)
continue
}
logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err)
} else if errors.Cause(err) != os.ErrNotExist {
return err
}
defer bigData.Close()
metadata, err := readMetadataFromCache(bigData)
if err != nil {
logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err)
}
if metadata != nil {
c.addLayer(r.ID, metadata)
continue
}
// otherwise create it from the layer TOC.
manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
if err != nil {
continue
}
defer manifestReader.Close()
manifest, err := ioutil.ReadAll(manifestReader)
if err != nil {
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
}
metadata, err = writeCache(manifest, r.ID, c.store)
metadata, err := writeCache(manifest, r.ID, c.store)
if err == nil {
c.addLayer(r.ID, metadata)
}

View File

@ -1248,7 +1248,7 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
func checkChownErr(err error, name string, uid, gid int) error {
if errors.Is(err, syscall.EINVAL) {
return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally: %w", uid, gid, name, err)
return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err)
}
return err
}

View File

@ -12,109 +12,109 @@ type ThinpoolOptionsConfig struct {
// grown. This is specified in terms of % of pool size. So a value of
// 20 means that when threshold is hit, pool will be grown by 20% of
// existing pool size.
AutoExtendPercent string `toml:"autoextend_percent"`
AutoExtendPercent string `toml:"autoextend_percent,omitempty"`
// AutoExtendThreshold determines the pool extension threshold in terms
// of percentage of pool size. For example, if threshold is 60, that
// means when pool is 60% full, threshold has been hit.
AutoExtendThreshold string `toml:"autoextend_threshold"`
AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"`
// BaseSize specifies the size to use when creating the base device,
// which limits the size of images and containers.
BaseSize string `toml:"basesize"`
BaseSize string `toml:"basesize,omitempty"`
// BlockSize specifies a custom blocksize to use for the thin pool.
BlockSize string `toml:"blocksize"`
BlockSize string `toml:"blocksize,omitempty"`
// DirectLvmDevice specifies a custom block storage device to use for
// the thin pool.
DirectLvmDevice string `toml:"directlvm_device"`
DirectLvmDevice string `toml:"directlvm_device,omitempty"`
// DirectLvmDeviceForcewipes device even if device already has a
// filesystem
DirectLvmDeviceForce string `toml:"directlvm_device_force"`
DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"`
// Fs specifies the filesystem type to use for the base device.
Fs string `toml:"fs"`
Fs string `toml:"fs,omitempty"`
// log_level sets the log level of devicemapper.
LogLevel string `toml:"log_level"`
LogLevel string `toml:"log_level,omitempty"`
// MetadataSize specifies the size of the metadata for the thinpool
// It will be used with the `pvcreate --metadata` option.
MetadataSize string `toml:"metadatasize"`
MetadataSize string `toml:"metadatasize,omitempty"`
// MinFreeSpace specifies the min free space percent in a thin pool
// require for new device creation to
MinFreeSpace string `toml:"min_free_space"`
MinFreeSpace string `toml:"min_free_space,omitempty"`
// MkfsArg specifies extra mkfs arguments to be used when creating the
// basedevice.
MkfsArg string `toml:"mkfsarg"`
MkfsArg string `toml:"mkfsarg,omitempty"`
// MountOpt specifies extra mount options used when mounting the thin
// devices.
MountOpt string `toml:"mountopt"`
MountOpt string `toml:"mountopt,omitempty"`
// Size
Size string `toml:"size"`
Size string `toml:"size,omitempty"`
// UseDeferredDeletion marks device for deferred deletion
UseDeferredDeletion string `toml:"use_deferred_deletion"`
UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"`
// UseDeferredRemoval marks device for deferred removal
UseDeferredRemoval string `toml:"use_deferred_removal"`
UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"`
// XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
// retries XFS should attempt to complete IO when ENOSPC (no space)
// error is returned by underlying storage device.
XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"`
XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"`
}
type AufsOptionsConfig struct {
// MountOpt specifies extra mount options used when mounting
MountOpt string `toml:"mountopt"`
MountOpt string `toml:"mountopt,omitempty"`
}
type BtrfsOptionsConfig struct {
// MinSpace is the minimal spaces allocated to the device
MinSpace string `toml:"min_space"`
MinSpace string `toml:"min_space,omitempty"`
// Size
Size string `toml:"size"`
Size string `toml:"size,omitempty"`
}
type OverlayOptionsConfig struct {
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
IgnoreChownErrors string `toml:"ignore_chown_errors"`
IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
// MountOpt specifies extra mount options used when mounting
MountOpt string `toml:"mountopt"`
MountOpt string `toml:"mountopt,omitempty"`
// Alternative program to use for the mount of the file system
MountProgram string `toml:"mount_program"`
MountProgram string `toml:"mount_program,omitempty"`
// Size
Size string `toml:"size"`
Size string `toml:"size,omitempty"`
// Inodes is used to set a maximum inodes of the container image.
Inodes string `toml:"inodes"`
Inodes string `toml:"inodes,omitempty"`
// Do not create a bind mount on the storage home
SkipMountHome string `toml:"skip_mount_home"`
SkipMountHome string `toml:"skip_mount_home,omitempty"`
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
// files and directories
ForceMask string `toml:"force_mask"`
ForceMask string `toml:"force_mask,omitempty"`
}
type VfsOptionsConfig struct {
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
IgnoreChownErrors string `toml:"ignore_chown_errors"`
IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
}
type ZfsOptionsConfig struct {
// MountOpt specifies extra mount options used when mounting
MountOpt string `toml:"mountopt"`
MountOpt string `toml:"mountopt,omitempty"`
// Name is the File System name of the ZFS File system
Name string `toml:"fsname"`
Name string `toml:"fsname,omitempty"`
// Size
Size string `toml:"size"`
Size string `toml:"size,omitempty"`
}
// OptionsConfig represents the "storage.options" TOML config table.
@ -122,82 +122,82 @@ type OptionsConfig struct {
// AdditionalImagesStores is the location of additional read/only
// Image stores. Usually used to access Networked File System
// for shared image content
AdditionalImageStores []string `toml:"additionalimagestores"`
AdditionalImageStores []string `toml:"additionalimagestores,omitempty"`
// AdditionalLayerStores is the location of additional read/only
// Layer stores. Usually used to access Networked File System
// for shared image content
// This API is experimental and can be changed without bumping the
// major version number.
AdditionalLayerStores []string `toml:"additionallayerstores"`
AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"`
// Size
Size string `toml:"size"`
Size string `toml:"size,omitempty"`
// RemapUIDs is a list of default UID mappings to use for layers.
RemapUIDs string `toml:"remap-uids"`
RemapUIDs string `toml:"remap-uids,omitempty"`
// RemapGIDs is a list of default GID mappings to use for layers.
RemapGIDs string `toml:"remap-gids"`
RemapGIDs string `toml:"remap-gids,omitempty"`
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
IgnoreChownErrors string `toml:"ignore_chown_errors"`
IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
// files and directories.
ForceMask os.FileMode `toml:"force_mask"`
ForceMask os.FileMode `toml:"force_mask,omitempty"`
// RemapUser is the name of one or more entries in /etc/subuid which
// should be used to set up default UID mappings.
RemapUser string `toml:"remap-user"`
RemapUser string `toml:"remap-user,omitempty"`
// RemapGroup is the name of one or more entries in /etc/subgid which
// should be used to set up default GID mappings.
RemapGroup string `toml:"remap-group"`
RemapGroup string `toml:"remap-group,omitempty"`
// RootAutoUsernsUser is the name of one or more entries in /etc/subuid and
// /etc/subgid which should be used to set up automatically a userns.
RootAutoUsernsUser string `toml:"root-auto-userns-user"`
RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"`
// AutoUsernsMinSize is the minimum size for a user namespace that is
// created automatically.
AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"`
AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"`
// AutoUsernsMaxSize is the maximum size for a user namespace that is
// created automatically.
AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"`
AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"`
// Aufs container options to be handed to aufs drivers
Aufs struct{ AufsOptionsConfig } `toml:"aufs"`
Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"`
// Btrfs container options to be handed to btrfs drivers
Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"`
Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"`
// Thinpool container options to be handed to thinpool drivers
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"`
// Overlay container options to be handed to overlay drivers
Overlay struct{ OverlayOptionsConfig } `toml:"overlay"`
Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"`
// Vfs container options to be handed to VFS drivers
Vfs struct{ VfsOptionsConfig } `toml:"vfs"`
Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"`
// Zfs container options to be handed to ZFS drivers
Zfs struct{ ZfsOptionsConfig } `toml:"zfs"`
Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"`
// Do not create a bind mount on the storage home
SkipMountHome string `toml:"skip_mount_home"`
SkipMountHome string `toml:"skip_mount_home,omitempty"`
// Alternative program to use for the mount of the file system
MountProgram string `toml:"mount_program"`
MountProgram string `toml:"mount_program,omitempty"`
// MountOpt specifies extra mount options used when mounting
MountOpt string `toml:"mountopt"`
MountOpt string `toml:"mountopt,omitempty"`
// PullOptions specifies options to be handed to pull managers
// This API is experimental and can be changed without bumping the major version number.
PullOptions map[string]string `toml:"pull_options"`
PullOptions map[string]string `toml:"pull_options,omitempty"`
// DisableVolatile doesn't allow volatile mounts when it is set.
DisableVolatile bool `toml:"disable-volatile"`
DisableVolatile bool `toml:"disable-volatile,omitempty"`
}
// GetGraphDriverOptions returns the driver specific options

View File

@ -297,7 +297,7 @@ func parseSubidFile(path, username string) (ranges, error) {
func checkChownErr(err error, name string, uid, gid int) error {
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally", uid, gid, name)
return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate", uid, gid, name)
}
return err
}

View File

@ -9,6 +9,7 @@ import (
"io"
"os"
"os/exec"
"os/signal"
"os/user"
"runtime"
"strconv"
@ -484,6 +485,30 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// Finish up.
logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
// Forward SIGHUP, SIGINT, and SIGTERM to our child process.
interrupted := make(chan os.Signal, 100)
defer func() {
signal.Stop(interrupted)
close(interrupted)
}()
cmd.Hook = func(int) error {
go func() {
for receivedSignal := range interrupted {
cmd.Cmd.Process.Signal(receivedSignal)
}
}()
return nil
}
signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
// Make sure our child process gets SIGKILLed if we exit, for whatever
// reason, before it does.
if cmd.Cmd.SysProcAttr == nil {
cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{}
}
cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
ExecRunnable(cmd, nil)
}
@ -501,11 +526,11 @@ func ExecRunnable(cmd Runnable, cleanup func()) {
if exitError.ProcessState.Exited() {
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
if waitStatus.Exited() {
logrus.Errorf("%v", exitError)
logrus.Debugf("%v", exitError)
exit(waitStatus.ExitStatus())
}
if waitStatus.Signaled() {
logrus.Errorf("%v", exitError)
logrus.Debugf("%v", exitError)
exit(int(waitStatus.Signal()) + 128)
}
}

View File

@ -31,6 +31,14 @@ import (
"github.com/pkg/errors"
)
type updateNameOperation int
const (
setNames updateNameOperation = iota
addNames
removeNames
)
var (
stores []*store
storesLock sync.Mutex
@ -368,8 +376,17 @@ type Store interface {
// SetNames changes the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
// AddNames adds the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
AddNames(id string, names []string) error
// RemoveNames removes the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
RemoveNames(id string, names []string) error
// ListImageBigData retrieves a list of the (possibly large) chunks of
// named data associated with an image.
ListImageBigData(id string) ([]string, error)
@ -2050,7 +2067,20 @@ func dedupeNames(names []string) []string {
return deduped
}
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
func (s *store) SetNames(id string, names []string) error {
return s.updateNames(id, names, setNames)
}
func (s *store) AddNames(id string, names []string) error {
return s.updateNames(id, names, addNames)
}
func (s *store) RemoveNames(id string, names []string) error {
return s.updateNames(id, names, removeNames)
}
func (s *store) updateNames(id string, names []string, op updateNameOperation) error {
deduped := dedupeNames(names)
rlstore, err := s.LayerStore()
@ -2063,7 +2093,16 @@ func (s *store) SetNames(id string, names []string) error {
return err
}
if rlstore.Exists(id) {
return rlstore.SetNames(id, deduped)
switch op {
case setNames:
return rlstore.SetNames(id, deduped)
case removeNames:
return rlstore.RemoveNames(id, deduped)
case addNames:
return rlstore.AddNames(id, deduped)
default:
return errInvalidUpdateNameOperation
}
}
ristore, err := s.ImageStore()
@ -2076,7 +2115,16 @@ func (s *store) SetNames(id string, names []string) error {
return err
}
if ristore.Exists(id) {
return ristore.SetNames(id, deduped)
switch op {
case setNames:
return ristore.SetNames(id, deduped)
case removeNames:
return ristore.RemoveNames(id, deduped)
case addNames:
return ristore.AddNames(id, deduped)
default:
return errInvalidUpdateNameOperation
}
}
// Check is id refers to a RO Store
@ -2114,7 +2162,16 @@ func (s *store) SetNames(id string, names []string) error {
return err
}
if rcstore.Exists(id) {
return rcstore.SetNames(id, deduped)
switch op {
case setNames:
return rcstore.SetNames(id, deduped)
case removeNames:
return rcstore.RemoveNames(id, deduped)
case addNames:
return rcstore.AddNames(id, deduped)
default:
return errInvalidUpdateNameOperation
}
}
return ErrLayerUnknown
}
@ -2532,17 +2589,12 @@ func (s *store) DeleteContainer(id string) error {
}()
var errors []error
for {
select {
case err, ok := <-errChan:
if !ok {
return multierror.Append(nil, errors...).ErrorOrNil()
}
if err != nil {
errors = append(errors, err)
}
for err := range errChan {
if err != nil {
errors = append(errors, err)
}
}
return multierror.Append(nil, errors...).ErrorOrNil()
}
}
return ErrNotAContainer

View File

@ -55,4 +55,6 @@ var (
ErrStoreIsReadOnly = errors.New("called a write method on a read-only store")
// ErrNotSupported is returned when the requested functionality is not supported.
ErrNotSupported = errors.New("not supported")
// ErrInvalidMappings is returned when the specified mappings are invalid.
ErrInvalidMappings = errors.New("invalid mappings specified")
)

View File

@ -3,14 +3,12 @@ package types
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
"github.com/containers/storage/drivers/overlay"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/idtools"
"github.com/sirupsen/logrus"
@ -19,11 +17,11 @@ import (
// TOML-friendly explicit tables used for conversions.
type TomlConfig struct {
Storage struct {
Driver string `toml:"driver"`
RunRoot string `toml:"runroot"`
GraphRoot string `toml:"graphroot"`
RootlessStoragePath string `toml:"rootless_storage_path"`
Options cfg.OptionsConfig `toml:"options"`
Driver string `toml:"driver,omitempty"`
RunRoot string `toml:"runroot,omitempty"`
GraphRoot string `toml:"graphroot,omitempty"`
RootlessStoragePath string `toml:"rootless_storage_path,omitempty"`
Options cfg.OptionsConfig `toml:"options,omitempty"`
} `toml:"storage"`
}
@ -225,25 +223,11 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
opts.GraphDriverName = overlayDriver
}
if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver {
supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime)
if err != nil {
return opts, err
}
if supported {
opts.GraphDriverName = overlayDriver
} else {
if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
opts.GraphDriverName = overlayDriver
opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
}
}
if opts.GraphDriverName == overlayDriver {
for _, o := range systemOpts.GraphDriverOptions {
if strings.Contains(o, "ignore_chown_errors") {
opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
break
}
if opts.GraphDriverName == overlayDriver {
for _, o := range systemOpts.GraphDriverOptions {
if strings.Contains(o, "ignore_chown_errors") {
opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
break
}
}
}
@ -431,11 +415,12 @@ func Save(conf TomlConfig, rootless bool) error {
if err != nil {
return err
}
if err = os.Remove(configFile); !os.IsNotExist(err) {
if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil {
return err
}
f, err := os.Open(configFile)
f, err := os.Create(configFile)
if err != nil {
return err
}

View File

@ -40,3 +40,35 @@ func validateMountOptions(mountOptions []string) error {
}
return nil
}
func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) {
result := make([]string, 0)
switch op {
case setNames:
// ignore all old names and just return new names
return dedupeNames(opParameters), nil
case removeNames:
// remove given names from old names
for _, name := range oldNames {
// only keep names in final result which do not intersect with input names
// basically `result = oldNames - opParameters`
nameShouldBeRemoved := false
for _, opName := range opParameters {
if name == opName {
nameShouldBeRemoved = true
}
}
if !nameShouldBeRemoved {
result = append(result, name)
}
}
return dedupeNames(result), nil
case addNames:
result = append(result, opParameters...)
result = append(result, oldNames...)
return dedupeNames(result), nil
default:
return result, errInvalidUpdateNameOperation
}
return dedupeNames(result), nil
}

View File

@ -17,6 +17,23 @@ This package provides various compression algorithms.
# changelog
* Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
<details>
<summary>See Details</summary>
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
While the release has been extensively tested, it is recommended to testing when upgrading.
</details>
* Feb 22, 2022 (v1.14.4)
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)

View File

@ -0,0 +1,5 @@
package huff0
//go:generate go run generate.go
//go:generate asmfmt -w decompress_amd64.s
//go:generate asmfmt -w decompress_8b_amd64.s

View File

@ -165,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63))
}
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
return uint16(b.value >> n)
}
func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63

View File

@ -725,189 +725,6 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
return dst, br.close()
}
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
{
const stream = 0
const stream2 = 1
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
{
const stream = 2
const stream2 = 3
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
off += 2
if off == 0 {
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of

View File

@ -0,0 +1,488 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// SECOND PART:
// val2 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 0+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// SECOND PART:
// val2 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 256+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// SECOND PART:
// val2 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 512+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// SECOND PART:
// val2 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 768+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -0,0 +1,197 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// SECOND PART:
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -0,0 +1,181 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X
// that uses an asm implementation of its main loop.
package huff0
import (
"errors"
"fmt"
)
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
// go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
// go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
use8BitTables := d.actualTableLog <= 8
if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
const debug = false
// see: bitReaderShifted.peekBitsFast()
peekBits := uint8((64 - d.actualTableLog) & 63)
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
if use8BitTables {
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
} else {
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
}
if debug {
fmt.Print("DEBUG: ")
fmt.Printf("off=%d,", off)
for i := 0; i < 4; i++ {
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
i, br[i].bitsRead, br[i].value, br[i].off)
}
fmt.Println("")
}
if off != 0 {
break
}
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}

View File

@ -0,0 +1,506 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -0,0 +1,195 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -0,0 +1,193 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// This file contains a generic implementation of Decoder.Decompress4X.
package huff0
import (
"errors"
"fmt"
)
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
{
const stream = 0
const stream2 = 1
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
{
const stream = 2
const stream2 = 3
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
off += 2
if off == 0 {
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}

View File

@ -153,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
This package:
file out level insize outsize millis mb/s
silesia.tar zskp 1 211947520 73101992 643 313.87
silesia.tar zskp 2 211947520 67504318 969 208.38
silesia.tar zskp 3 211947520 64595893 2007 100.68
silesia.tar zskp 4 211947520 60995370 8825 22.90
silesia.tar zskp 1 211947520 73821326 634 318.47
silesia.tar zskp 2 211947520 67655404 1508 133.96
silesia.tar zskp 3 211947520 64746933 3000 67.37
silesia.tar zskp 4 211947520 60073508 16926 11.94
cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56
@ -165,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package:
silesia.tar gzstd 1 211947520 80007735 1654 122.21
silesia.tar gzkp 1 211947520 80136201 1152 175.45
silesia.tar gzstd 1 211947520 80007735 1498 134.87
silesia.tar gzkp 1 211947520 80088272 1009 200.31
GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z
file out level insize outsize millis mb/s
gob-stream zskp 1 1911399616 235022249 3088 590.30
gob-stream zskp 2 1911399616 205669791 3786 481.34
gob-stream zskp 3 1911399616 175034659 9636 189.17
gob-stream zskp 4 1911399616 165609838 50369 36.19
gob-stream zskp 1 1911399616 233948096 3230 564.34
gob-stream zskp 2 1911399616 203997694 4997 364.73
gob-stream zskp 3 1911399616 173526523 13435 135.68
gob-stream zskp 4 1911399616 162195235 47559 38.33
gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70
gob-stream gzstd 1 1911399616 357382641 10251 177.82
gob-stream gzkp 1 1911399616 359753026 5438 335.20
gob-stream gzstd 1 1911399616 357382013 9046 201.49
gob-stream gzkp 1 1911399616 359136669 4885 373.08
The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
http://mattmahoney.net/dc/textdata.html
file out level insize outsize millis mb/s
enwik9 zskp 1 1000000000 343848582 3609 264.18
enwik9 zskp 2 1000000000 317276632 5746 165.97
enwik9 zskp 3 1000000000 292243069 12162 78.41
enwik9 zskp 4 1000000000 262183768 82837 11.51
enwik9 zskp 1 1000000000 343833605 3687 258.64
enwik9 zskp 2 1000000000 317001237 7672 124.29
enwik9 zskp 3 1000000000 291915823 15923 59.89
enwik9 zskp 4 1000000000 261710291 77697 12.27
enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40
enwik9 gzstd 1 1000000000 382578136 9604 99.30
enwik9 gzkp 1 1000000000 383825945 6544 145.73
enwik9 gzstd 1 1000000000 382578136 8608 110.78
enwik9 gzkp 1 1000000000 382781160 5628 169.45
Highly compressible JSON file.
https://files.klauspost.com/compress/github-june-2days-2019.json.zst
file out level insize outsize millis mb/s
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z
file out level insize outsize millis mb/s
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
file out level insize outsize millis mb/s
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
```
## Decompressor

View File

@ -167,6 +167,11 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
return ErrCompressedSizeTooBig
}
// Empty compressed blocks must at least be 2 bytes
// for Literals_Block_Type and one for Sequences_Section_Header.
if cSize < 2 {
return ErrBlockTooSmall
}
case blockTypeRaw:
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
if debugDecoder {
@ -491,6 +496,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
if debugDecoder {
printf("prepareSequences: %d byte(s) input\n", len(in))
}
// Decode Sequences
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
if len(in) < 1 {
@ -499,8 +507,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
var nSeqs int
seqHeader := in[0]
switch {
case seqHeader == 0:
in = in[1:]
case seqHeader < 128:
nSeqs = int(seqHeader)
in = in[1:]
@ -517,6 +523,13 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:]
}
if nSeqs == 0 && len(in) != 0 {
// When no sequences, there should not be any more data...
if debugDecoder {
printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
}
return ErrUnexpectedBlockSize
}
var seqs = &hist.decoders
seqs.nSeqs = nSeqs
@ -635,6 +648,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
hist.decoders.seqSize = len(hist.decoders.literals)
return nil
}
hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets
err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset

View File

@ -348,10 +348,10 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.history.setDict(&dict)
}
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded
}
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
if frame.FrameContentSize < 1<<30 {
// Never preallocate more than 1 GB up front.
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
@ -514,7 +514,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
// Check frame size (before CRC)
d.syncStream.decodedFrame += uint64(len(d.current.b))
if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize {
if d.syncStream.decodedFrame > d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
@ -523,7 +523,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
}
// Check FCS
if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize {
if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
@ -700,6 +700,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
hist.windowSize = block.async.newHist.windowSize
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
@ -811,11 +812,11 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if !hasErr {
decodedFrame += uint64(len(do.b))
if fcs > 0 && decodedFrame > fcs {
if decodedFrame > fcs {
println("fcs exceeded", block.Last, fcs, decodedFrame)
do.err = ErrFrameSizeExceeded
hasErr = true
} else if block.Last && fcs > 0 && decodedFrame != fcs {
} else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
do.err = ErrFrameSizeMismatch
hasErr = true
} else {

View File

@ -197,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
default:
fcsSize = 1 << v
}
d.FrameContentSize = 0
d.FrameContentSize = fcsUnknown
if fcsSize > 0 {
b, err := br.readSmall(fcsSize)
if err != nil {
@ -343,12 +343,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
err = ErrDecoderSizeExceeded
break
}
if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
err = ErrFrameSizeExceeded
break
}
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
err = ErrFrameSizeExceeded
break
@ -356,13 +351,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if dec.Last {
break
}
if debugDecoder && d.FrameContentSize > 0 {
if debugDecoder {
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
}
}
dst = d.history.b
if err == nil {
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch
} else if d.HasCheckSum {
var n int

View File

@ -1,5 +1,5 @@
//go:build gofuzz
// +build gofuzz
//go:build ignorecrc
// +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.

View File

@ -1,5 +1,5 @@
//go:build !gofuzz
// +build !gofuzz
//go:build !ignorecrc
// +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.

View File

@ -107,7 +107,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
@ -192,7 +195,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
@ -230,7 +233,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
@ -347,6 +350,10 @@ func (s *sequenceDecs) decodeSync(history *history) error {
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
hist := history.b[history.ignoreBuffer:]
out := s.out
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
@ -426,7 +433,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size", size)
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
}
if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
@ -535,6 +542,11 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
}
// Check if space for literals
if len(s.literals)+len(s.out)-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
}
// Add final literals
s.out = append(out, s.literals...)
return br.close()

View File

@ -20,7 +20,7 @@ const ZipMethodPKWare = 20
var zipReaderPool sync.Pool
// newZipReader cannot be used since we would leak goroutines...
// newZipReader creates a pooled zip decompressor.
func newZipReader(r io.Reader) io.ReadCloser {
dec, ok := zipReaderPool.Get().(*Decoder)
if ok {
@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.dec == nil {
return 0, errors.New("Read after Close")
return 0, errors.New("read after close or EOF")
}
dec, err := r.dec.Read(p)
if err == io.EOF {
err = r.dec.Reset(nil)
zipReaderPool.Put(r.dec)
r.dec = nil
}
return dec, err
}
@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
return func(r io.Reader) io.ReadCloser {
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
if err != nil {
panic(err)
}
return d.IOReadCloser()
}
return newZipReader
}

View File

@ -39,6 +39,9 @@ const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
// fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64
var (
// ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input.
@ -52,6 +55,10 @@ var (
// Typically returned on invalid input.
ErrBlockTooSmall = errors.New("block too small")
// ErrUnexpectedBlockSize is returned when a block has unexpected size.
// Typically returned on invalid input.
ErrUnexpectedBlockSize = errors.New("unexpected block size")
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
// Typically this indicates wrong or corrupted input.
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")

9
vendor/modules.txt vendored
View File

@ -2,7 +2,6 @@
github.com/BurntSushi/toml
github.com/BurntSushi/toml/internal
# github.com/Microsoft/go-winio v0.5.2
## explicit
github.com/Microsoft/go-winio
github.com/Microsoft/go-winio/backuptar
github.com/Microsoft/go-winio/pkg/guid
@ -44,7 +43,7 @@ github.com/containerd/cgroups/stats/v1
github.com/containerd/containerd/errdefs
github.com/containerd/containerd/log
github.com/containerd/containerd/platforms
# github.com/containerd/stargz-snapshotter/estargz v0.11.0
# github.com/containerd/stargz-snapshotter/estargz v0.11.3
github.com/containerd/stargz-snapshotter/estargz
github.com/containerd/stargz-snapshotter/estargz/errorutil
# github.com/containers/common v0.47.4
@ -126,7 +125,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7
github.com/containers/ocicrypt/spec
github.com/containers/ocicrypt/utils
github.com/containers/ocicrypt/utils/keyprovider
# github.com/containers/storage v1.38.2
# github.com/containers/storage v1.39.0
## explicit
github.com/containers/storage
github.com/containers/storage/drivers
@ -252,8 +251,7 @@ github.com/imdario/mergo
github.com/inconshreveable/mousetrap
# github.com/json-iterator/go v1.1.12
github.com/json-iterator/go
# github.com/klauspost/compress v1.15.0
## explicit
# github.com/klauspost/compress v1.15.1
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@ -278,7 +276,6 @@ github.com/miekg/pkcs11
# github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/mistifyio/go-zfs
# github.com/moby/sys/mountinfo v0.6.0
## explicit
github.com/moby/sys/mountinfo
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
github.com/modern-go/concurrent