fix(deps): update module github.com/containers/storage to v1.45.0

Signed-off-by: Renovate Bot <bot@renovateapp.com>
This commit is contained in:
renovate[bot] 2023-01-13 01:15:34 +00:00 committed by GitHub
parent 28175104d7
commit 1133a2a395
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
118 changed files with 3801 additions and 1741 deletions

10
go.mod
View File

@ -6,7 +6,7 @@ require (
github.com/containers/common v0.50.1 github.com/containers/common v0.50.1
github.com/containers/image/v5 v5.23.1-0.20221019175208-1dd254487708 github.com/containers/image/v5 v5.23.1-0.20221019175208-1dd254487708
github.com/containers/ocicrypt v1.1.6 github.com/containers/ocicrypt v1.1.6
github.com/containers/storage v1.44.0 github.com/containers/storage v1.45.0
github.com/docker/distribution v2.8.1+incompatible github.com/docker/distribution v2.8.1+incompatible
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0-rc2 github.com/opencontainers/image-spec v1.1.0-rc2
@ -24,11 +24,11 @@ require (
require ( require (
github.com/BurntSushi/toml v1.2.1 // indirect github.com/BurntSushi/toml v1.2.1 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.5 // indirect github.com/Microsoft/hcsshim v0.9.6 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/containerd/cgroups v1.0.4 // indirect github.com/containerd/cgroups v1.0.4 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.12.1 // indirect github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
@ -50,7 +50,7 @@ require (
github.com/imdario/mergo v0.3.13 // indirect github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.12 // indirect github.com/klauspost/compress v1.15.14 // indirect
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
github.com/kr/pretty v0.3.0 // indirect github.com/kr/pretty v0.3.0 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
@ -78,7 +78,7 @@ require (
github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect
github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 // indirect github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect github.com/ulikunitz/xz v0.5.11 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vbauerster/mpb/v7 v7.5.3 // indirect github.com/vbauerster/mpb/v7 v7.5.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect

18
go.sum
View File

@ -209,8 +209,8 @@ github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim v0.9.5 h1:AbV+VPfTrIVffukazHcpxmz/sRiE6YaMDzHWR9BXZHo= github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY=
github.com/Microsoft/hcsshim v0.9.5/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -454,8 +454,8 @@ github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oM
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M= github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
github.com/containerd/stargz-snapshotter/estargz v0.12.1 h1:+7nYmHJb0tEkcRaAW+MHqoKaJYZmkikupxCqVtmPuY0= github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw=
github.com/containerd/stargz-snapshotter/estargz v0.12.1/go.mod h1:12VUuCq3qPq4y8yUW+l5w3+oXV3cx2Po3KSe/SmPGqw= github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@ -497,8 +497,8 @@ github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhI
github.com/containers/ocicrypt v1.1.6 h1:uoG52u2e91RE4UqmBICZY8dNshgfvkdl3BW6jnxiFaI= github.com/containers/ocicrypt v1.1.6 h1:uoG52u2e91RE4UqmBICZY8dNshgfvkdl3BW6jnxiFaI=
github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
github.com/containers/storage v1.44.0 h1:xCFhwILjjU+Hg133d07EVCgaaXn0ileGcVsAUcV8tDY= github.com/containers/storage v1.45.0 h1:UoCtlZf1eYi9zPl7XfhdCrifIpQABogomH1Gh0lsU70=
github.com/containers/storage v1.44.0/go.mod h1:HSfx7vUXwKPatPMqhgMw3mI3c3ijIJPZV5O0sj/mVxI= github.com/containers/storage v1.45.0/go.mod h1:OdRUYHrq1HP6iAo79VxqtYuJzC5j4eA2I60jKOoCT7g=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -1172,8 +1172,9 @@ github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHU
github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc=
github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0= github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0=
@ -1733,8 +1734,9 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=

View File

@ -86,6 +86,12 @@ type Container interface {
// container to be terminated by some error condition (including calling // container to be terminated by some error condition (including calling
// Close). // Close).
Wait() error Wait() error
// WaitChannel returns the wait channel of the container
WaitChannel() <-chan struct{}
// WaitError returns the container termination error.
// This function should only be called after the channel in WaitChannel()
// is closed. Otherwise it is not thread safe.
WaitError() error
// Modify sends a request to modify container resources // Modify sends a request to modify container resources
Modify(ctx context.Context, config interface{}) error Modify(ctx context.Context, config interface{}) error
} }

View File

@ -287,11 +287,19 @@ func (computeSystem *System) waitBackground() {
oc.SetSpanStatus(span, err) oc.SetSpanStatus(span, err)
} }
func (computeSystem *System) WaitChannel() <-chan struct{} {
return computeSystem.waitBlock
}
func (computeSystem *System) WaitError() error {
return computeSystem.waitError
}
// Wait synchronously waits for the compute system to shutdown or terminate. If // Wait synchronously waits for the compute system to shutdown or terminate. If
// the compute system has already exited returns the previous error (if any). // the compute system has already exited returns the previous error (if any).
func (computeSystem *System) Wait() error { func (computeSystem *System) Wait() error {
<-computeSystem.waitBlock <-computeSystem.WaitChannel()
return computeSystem.waitError return computeSystem.WaitError()
} }
// ExitError returns an error describing the reason the compute system terminated. // ExitError returns an error describing the reason the compute system terminated.

View File

@ -49,6 +49,7 @@ type options struct {
missedPrioritizedFiles *[]string missedPrioritizedFiles *[]string
compression Compression compression Compression
ctx context.Context ctx context.Context
minChunkSize int
} }
type Option func(o *options) error type Option func(o *options) error
@ -63,6 +64,7 @@ func WithChunkSize(chunkSize int) Option {
// WithCompressionLevel option specifies the gzip compression level. // WithCompressionLevel option specifies the gzip compression level.
// The default is gzip.BestCompression. // The default is gzip.BestCompression.
// This option will be ignored if WithCompression option is used.
// See also: https://godoc.org/compress/gzip#pkg-constants // See also: https://godoc.org/compress/gzip#pkg-constants
func WithCompressionLevel(level int) Option { func WithCompressionLevel(level int) Option {
return func(o *options) error { return func(o *options) error {
@ -113,6 +115,18 @@ func WithContext(ctx context.Context) Option {
} }
} }
// WithMinChunkSize option specifies the minimal number of bytes of data
// must be written in one gzip stream.
// By increasing this number, one gzip stream can contain multiple files
// and it hopefully leads to smaller result blob.
// NOTE: This adds a TOC property that old reader doesn't understand.
func WithMinChunkSize(minChunkSize int) Option {
return func(o *options) error {
o.minChunkSize = minChunkSize
return nil
}
}
// Blob is an eStargz blob. // Blob is an eStargz blob.
type Blob struct { type Blob struct {
io.ReadCloser io.ReadCloser
@ -180,7 +194,14 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) var tarParts [][]*entry
if opts.minChunkSize > 0 {
// Each entry needs to know the size of the current gzip stream so they
// cannot be processed in parallel.
tarParts = [][]*entry{entries}
} else {
tarParts = divideEntries(entries, runtime.GOMAXPROCS(0))
}
writers := make([]*Writer, len(tarParts)) writers := make([]*Writer, len(tarParts))
payloads := make([]*os.File, len(tarParts)) payloads := make([]*os.File, len(tarParts))
var mu sync.Mutex var mu sync.Mutex
@ -195,6 +216,13 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
} }
sw := NewWriterWithCompressor(esgzFile, opts.compression) sw := NewWriterWithCompressor(esgzFile, opts.compression)
sw.ChunkSize = opts.chunkSize sw.ChunkSize = opts.chunkSize
sw.MinChunkSize = opts.minChunkSize
if sw.needsOpenGzEntries == nil {
sw.needsOpenGzEntries = make(map[string]struct{})
}
for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} {
sw.needsOpenGzEntries[f] = struct{}{}
}
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
return err return err
} }
@ -209,7 +237,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
rErr = err rErr = err
return nil, err return nil, err
} }
tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) tocAndFooter, tocDgst, err := closeWithCombine(writers...)
if err != nil { if err != nil {
rErr = err rErr = err
return nil, err return nil, err
@ -252,7 +280,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
// Writers doesn't write TOC and footer to the underlying writers so they can be // Writers doesn't write TOC and footer to the underlying writers so they can be
// combined into a single eStargz and tocAndFooter returned by this function can // combined into a single eStargz and tocAndFooter returned by this function can
// be appended at the tail of that combined blob. // be appended at the tail of that combined blob.
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
if len(ws) == 0 { if len(ws) == 0 {
return nil, "", fmt.Errorf("at least one writer must be passed") return nil, "", fmt.Errorf("at least one writer must be passed")
} }
@ -395,7 +423,7 @@ func readerFromEntries(entries ...*entry) io.Reader {
func importTar(in io.ReaderAt) (*tarFile, error) { func importTar(in io.ReaderAt) (*tarFile, error) {
tf := &tarFile{} tf := &tarFile{}
pw, err := newCountReader(in) pw, err := newCountReadSeeker(in)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to make position watcher: %w", err) return nil, fmt.Errorf("failed to make position watcher: %w", err)
} }
@ -571,19 +599,19 @@ func (tf *tempFiles) cleanupAll() error {
return errorutil.Aggregate(allErr) return errorutil.Aggregate(allErr)
} }
func newCountReader(r io.ReaderAt) (*countReader, error) { func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) {
pos := int64(0) pos := int64(0)
return &countReader{r: r, cPos: &pos}, nil return &countReadSeeker{r: r, cPos: &pos}, nil
} }
type countReader struct { type countReadSeeker struct {
r io.ReaderAt r io.ReaderAt
cPos *int64 cPos *int64
mu sync.Mutex mu sync.Mutex
} }
func (cr *countReader) Read(p []byte) (int, error) { func (cr *countReadSeeker) Read(p []byte) (int, error) {
cr.mu.Lock() cr.mu.Lock()
defer cr.mu.Unlock() defer cr.mu.Unlock()
@ -594,7 +622,7 @@ func (cr *countReader) Read(p []byte) (int, error) {
return n, err return n, err
} }
func (cr *countReader) Seek(offset int64, whence int) (int64, error) { func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
cr.mu.Lock() cr.mu.Lock()
defer cr.mu.Unlock() defer cr.mu.Unlock()
@ -615,7 +643,7 @@ func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
return offset, nil return offset, nil
} }
func (cr *countReader) currentPos() int64 { func (cr *countReadSeeker) currentPos() int64 {
cr.mu.Lock() cr.mu.Lock()
defer cr.mu.Unlock() defer cr.mu.Unlock()

View File

@ -150,10 +150,10 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
allErr = append(allErr, err) allErr = append(allErr, err)
continue continue
} }
if tocSize <= 0 { if tocOffset >= 0 && tocSize <= 0 {
tocSize = sr.Size() - tocOffset - fSize tocSize = sr.Size() - tocOffset - fSize
} }
if tocSize < int64(len(maybeTocBytes)) { if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) {
maybeTocBytes = maybeTocBytes[:tocSize] maybeTocBytes = maybeTocBytes[:tocSize]
} }
r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
@ -207,8 +207,16 @@ func (r *Reader) initFields() error {
uname := map[int]string{} uname := map[int]string{}
gname := map[int]string{} gname := map[int]string{}
var lastRegEnt *TOCEntry var lastRegEnt *TOCEntry
for _, ent := range r.toc.Entries { var chunkTopIndex int
for i, ent := range r.toc.Entries {
ent.Name = cleanEntryName(ent.Name) ent.Name = cleanEntryName(ent.Name)
switch ent.Type {
case "reg", "chunk":
if ent.Offset != r.toc.Entries[chunkTopIndex].Offset {
chunkTopIndex = i
}
ent.chunkTopIndex = chunkTopIndex
}
if ent.Type == "reg" { if ent.Type == "reg" {
lastRegEnt = ent lastRegEnt = ent
} }
@ -294,7 +302,7 @@ func (r *Reader) initFields() error {
if e.isDataType() { if e.isDataType() {
e.nextOffset = lastOffset e.nextOffset = lastOffset
} }
if e.Offset != 0 { if e.Offset != 0 && e.InnerOffset == 0 {
lastOffset = e.Offset lastOffset = e.Offset
} }
} }
@ -488,6 +496,14 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
// //
// Name must be absolute path or one that is relative to root. // Name must be absolute path or one that is relative to root.
func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
fr, err := r.newFileReader(name)
if err != nil {
return nil, err
}
return io.NewSectionReader(fr, 0, fr.size), nil
}
func (r *Reader) newFileReader(name string) (*fileReader, error) {
name = cleanEntryName(name) name = cleanEntryName(name)
ent, ok := r.Lookup(name) ent, ok := r.Lookup(name)
if !ok { if !ok {
@ -505,11 +521,19 @@ func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
Err: errors.New("not a regular file"), Err: errors.New("not a regular file"),
} }
} }
fr := &fileReader{ return &fileReader{
r: r, r: r,
size: ent.Size, size: ent.Size,
ents: r.getChunks(ent), ents: r.getChunks(ent),
}, nil
}
func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) {
fr, err := r.newFileReader(name)
if err != nil {
return nil, err
} }
fr.preRead = preRead
return io.NewSectionReader(fr, 0, fr.size), nil return io.NewSectionReader(fr, 0, fr.size), nil
} }
@ -521,9 +545,10 @@ func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
} }
type fileReader struct { type fileReader struct {
r *Reader r *Reader
size int64 size int64
ents []*TOCEntry // 1 or more reg/chunk entries ents []*TOCEntry // 1 or more reg/chunk entries
preRead func(*TOCEntry, io.Reader) error
} }
func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
@ -578,10 +603,48 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
} }
defer dr.Close() defer dr.Close()
if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) if fr.preRead == nil {
if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil {
return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err)
}
return io.ReadFull(dr, p)
} }
return io.ReadFull(dr, p)
var retN int
var retErr error
var found bool
var nr int64
for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] {
if !e.isDataType() {
continue
}
if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset {
break
}
if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr {
return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err)
}
nr = e.InnerOffset
if e == ent {
found = true
if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err)
}
retN, retErr = io.ReadFull(dr, p)
nr += off + int64(retN)
continue
}
cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)}
if err := fr.preRead(e, cr); err != nil {
return 0, fmt.Errorf("failed to pre read: %w", err)
}
nr += cr.n
}
if !found {
return 0, fmt.Errorf("fileReader.ReadAt: target entry not found")
}
return retN, retErr
} }
// A Writer writes stargz files. // A Writer writes stargz files.
@ -599,11 +662,20 @@ type Writer struct {
lastGroupname map[int]string lastGroupname map[int]string
compressor Compressor compressor Compressor
uncompressedCounter *countWriteFlusher
// ChunkSize optionally controls the maximum number of bytes // ChunkSize optionally controls the maximum number of bytes
// of data of a regular file that can be written in one gzip // of data of a regular file that can be written in one gzip
// stream before a new gzip stream is started. // stream before a new gzip stream is started.
// Zero means to use a default, currently 4 MiB. // Zero means to use a default, currently 4 MiB.
ChunkSize int ChunkSize int
// MinChunkSize optionally controls the minimum number of bytes
// of data must be written in one gzip stream before a new gzip
// NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand.
MinChunkSize int
needsOpenGzEntries map[string]struct{}
} }
// currentCompressionWriter writes to the current w.gz field, which can // currentCompressionWriter writes to the current w.gz field, which can
@ -646,6 +718,9 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse footer: %w", err) return nil, fmt.Errorf("failed to parse footer: %w", err)
} }
if blobPayloadSize < 0 {
blobPayloadSize = sr.Size()
}
return c.Reader(io.LimitReader(sr, blobPayloadSize)) return c.Reader(io.LimitReader(sr, blobPayloadSize))
} }
@ -672,11 +747,12 @@ func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer {
bw := bufio.NewWriter(w) bw := bufio.NewWriter(w)
cw := &countWriter{w: bw} cw := &countWriter{w: bw}
return &Writer{ return &Writer{
bw: bw, bw: bw,
cw: cw, cw: cw,
toc: &JTOC{Version: 1}, toc: &JTOC{Version: 1},
diffHash: sha256.New(), diffHash: sha256.New(),
compressor: c, compressor: c,
uncompressedCounter: &countWriteFlusher{},
} }
} }
@ -717,6 +793,20 @@ func (w *Writer) closeGz() error {
return nil return nil
} }
func (w *Writer) flushGz() error {
if w.closed {
return errors.New("flush on closed Writer")
}
if w.gz != nil {
if f, ok := w.gz.(interface {
Flush() error
}); ok {
return f.Flush()
}
}
return nil
}
// nameIfChanged returns name, unless it was the already the value of (*mp)[id], // nameIfChanged returns name, unless it was the already the value of (*mp)[id],
// in which case it returns the empty string. // in which case it returns the empty string.
func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
@ -736,6 +826,9 @@ func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
func (w *Writer) condOpenGz() (err error) { func (w *Writer) condOpenGz() (err error) {
if w.gz == nil { if w.gz == nil {
w.gz, err = w.compressor.Writer(w.cw) w.gz, err = w.compressor.Writer(w.cw)
if w.gz != nil {
w.gz = w.uncompressedCounter.register(w.gz)
}
} }
return return
} }
@ -784,6 +877,8 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
if lossless { if lossless {
tr.RawAccounting = true tr.RawAccounting = true
} }
prevOffset := w.cw.n
var prevOffsetUncompressed int64
for { for {
h, err := tr.Next() h, err := tr.Next()
if err == io.EOF { if err == io.EOF {
@ -883,10 +978,6 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
totalSize := ent.Size // save it before we destroy ent totalSize := ent.Size // save it before we destroy ent
tee := io.TeeReader(tr, payloadDigest.Hash()) tee := io.TeeReader(tr, payloadDigest.Hash())
for written < totalSize { for written < totalSize {
if err := w.closeGz(); err != nil {
return err
}
chunkSize := int64(w.chunkSize()) chunkSize := int64(w.chunkSize())
remain := totalSize - written remain := totalSize - written
if remain < chunkSize { if remain < chunkSize {
@ -894,7 +985,23 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
} else { } else {
ent.ChunkSize = chunkSize ent.ChunkSize = chunkSize
} }
ent.Offset = w.cw.n
// We flush the underlying compression writer here to correctly calculate "w.cw.n".
if err := w.flushGz(); err != nil {
return err
}
if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) {
if err := w.closeGz(); err != nil {
return err
}
ent.Offset = w.cw.n
prevOffset = ent.Offset
prevOffsetUncompressed = w.uncompressedCounter.n
} else {
ent.Offset = prevOffset
ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed
}
ent.ChunkOffset = written ent.ChunkOffset = written
chunkDigest := digest.Canonical.Digester() chunkDigest := digest.Canonical.Digester()
@ -940,6 +1047,17 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
return err return err
} }
func (w *Writer) needsOpenGz(ent *TOCEntry) bool {
if ent.Type != "reg" {
return false
}
if w.needsOpenGzEntries == nil {
return false
}
_, ok := w.needsOpenGzEntries[ent.Name]
return ok
}
// DiffID returns the SHA-256 of the uncompressed tar bytes. // DiffID returns the SHA-256 of the uncompressed tar bytes.
// It is only valid to call DiffID after Close. // It is only valid to call DiffID after Close.
func (w *Writer) DiffID() string { func (w *Writer) DiffID() string {
@ -956,6 +1074,28 @@ func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) {
} }
func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
if tocOff < 0 {
// This means that TOC isn't contained in the blob.
// We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from
// the external location.
start := time.Now()
toc, tocDgst, err := d.ParseTOC(nil)
if err != nil {
return nil, err
}
if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
opts.telemetry.GetTocLatency(start)
}
if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
opts.telemetry.DeserializeTocLatency(start)
}
return &Reader{
sr: sr,
toc: toc,
tocDigest: tocDgst,
decompressor: d,
}, nil
}
if len(tocBytes) > 0 { if len(tocBytes) > 0 {
start := time.Now() start := time.Now()
toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
@ -1021,6 +1161,37 @@ func (cw *countWriter) Write(p []byte) (n int, err error) {
return return
} }
type countWriteFlusher struct {
io.WriteCloser
n int64
}
func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser {
wc.WriteCloser = w
return wc
}
func (wc *countWriteFlusher) Write(p []byte) (n int, err error) {
n, err = wc.WriteCloser.Write(p)
wc.n += int64(n)
return
}
func (wc *countWriteFlusher) Flush() error {
if f, ok := wc.WriteCloser.(interface {
Flush() error
}); ok {
return f.Flush()
}
return nil
}
func (wc *countWriteFlusher) Close() error {
err := wc.WriteCloser.Close()
wc.WriteCloser = nil
return err
}
// isGzip reports whether br is positioned right before an upcoming gzip stream. // isGzip reports whether br is positioned right before an upcoming gzip stream.
// It does not consume any bytes from br. // It does not consume any bytes from br.
func isGzip(br *bufio.Reader) bool { func isGzip(br *bufio.Reader) bool {
@ -1039,3 +1210,14 @@ func positive(n int64) int64 {
} }
return n return n
} }
type countReader struct {
r io.Reader
n int64
}
func (cr *countReader) Read(p []byte) (n int, err error) {
n, err = cr.r.Read(p)
cr.n += int64(n)
return
}

View File

@ -60,7 +60,7 @@ type GzipCompressor struct {
compressionLevel int compressionLevel int
} }
func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) {
return gzip.NewWriterLevel(w, gc.compressionLevel) return gzip.NewWriterLevel(w, gc.compressionLevel)
} }

View File

@ -31,6 +31,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math/rand"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
@ -44,21 +45,27 @@ import (
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
) )
func init() {
rand.Seed(time.Now().UnixNano())
}
// TestingController is Compression with some helper methods necessary for testing. // TestingController is Compression with some helper methods necessary for testing.
type TestingController interface { type TestingController interface {
Compression Compression
CountStreams(*testing.T, []byte) int TestStreams(t *testing.T, b []byte, streams []int64)
DiffIDOf(*testing.T, []byte) string DiffIDOf(*testing.T, []byte) string
String() string String() string
} }
// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
func CompressionTestSuite(t *testing.T, controllers ...TestingController) { func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
} }
type TestingControllerFactory func() TestingController
const ( const (
uncompressedType int = iota uncompressedType int = iota
gzipType gzipType
@ -75,11 +82,12 @@ var allowedPrefix = [4]string{"", "./", "/", "../"}
// testBuild tests the resulting stargz blob built by this pkg has the same // testBuild tests the resulting stargz blob built by this pkg has the same
// contents as the normal stargz blob. // contents as the normal stargz blob.
func testBuild(t *testing.T, controllers ...TestingController) { func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
tests := []struct { tests := []struct {
name string name string
chunkSize int chunkSize int
in []tarEntry minChunkSize []int
in []tarEntry
}{ }{
{ {
name: "regfiles and directories", name: "regfiles and directories",
@ -108,11 +116,14 @@ func testBuild(t *testing.T, controllers ...TestingController) {
), ),
}, },
{ {
name: "various files", name: "various files",
chunkSize: 4, chunkSize: 4,
minChunkSize: []int{0, 64000},
in: tarOf( in: tarOf(
file("baz.txt", "bazbazbazbazbazbazbaz"), file("baz.txt", "bazbazbazbazbazbazbaz"),
file("foo.txt", "a"), file("foo1.txt", "a"),
file("bar/foo2.txt", "b"),
file("foo3.txt", "c"),
symlink("barlink", "test/bar.txt"), symlink("barlink", "test/bar.txt"),
dir("test/"), dir("test/"),
dir("dev/"), dir("dev/"),
@ -144,99 +155,112 @@ func testBuild(t *testing.T, controllers ...TestingController) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
if len(tt.minChunkSize) == 0 {
tt.minChunkSize = []int{0}
}
for _, srcCompression := range srcCompressions { for _, srcCompression := range srcCompressions {
srcCompression := srcCompression srcCompression := srcCompression
for _, cl := range controllers { for _, newCL := range controllers {
cl := cl newCL := newCL
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat srcTarFormat := srcTarFormat
for _, prefix := range allowedPrefix { for _, prefix := range allowedPrefix {
prefix := prefix prefix := prefix
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { for _, minChunkSize := range tt.minChunkSize {
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) minChunkSize := minChunkSize
// Test divideEntries() t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
entries, err := sortEntries(tarBlob, nil, nil) // identical order tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
if err != nil { // Test divideEntries()
t.Fatalf("failed to parse tar: %v", err) entries, err := sortEntries(tarBlob, nil, nil) // identical order
} if err != nil {
var merged []*entry t.Fatalf("failed to parse tar: %v", err)
for _, part := range divideEntries(entries, 4) {
merged = append(merged, part...)
}
if !reflect.DeepEqual(entries, merged) {
for _, e := range entries {
t.Logf("Original: %v", e.header)
} }
for _, e := range merged { var merged []*entry
t.Logf("Merged: %v", e.header) for _, part := range divideEntries(entries, 4) {
merged = append(merged, part...)
}
if !reflect.DeepEqual(entries, merged) {
for _, e := range entries {
t.Logf("Original: %v", e.header)
}
for _, e := range merged {
t.Logf("Merged: %v", e.header)
}
t.Errorf("divided entries couldn't be merged")
return
} }
t.Errorf("divided entries couldn't be merged")
return
}
// Prepare sample data // Prepare sample data
wantBuf := new(bytes.Buffer) cl1 := newCL()
sw := NewWriterWithCompressor(wantBuf, cl) wantBuf := new(bytes.Buffer)
sw.ChunkSize = tt.chunkSize sw := NewWriterWithCompressor(wantBuf, cl1)
if err := sw.AppendTar(tarBlob); err != nil { sw.MinChunkSize = minChunkSize
t.Fatalf("failed to append tar to want stargz: %v", err) sw.ChunkSize = tt.chunkSize
} if err := sw.AppendTar(tarBlob); err != nil {
if _, err := sw.Close(); err != nil { t.Fatalf("failed to append tar to want stargz: %v", err)
t.Fatalf("failed to prepare want stargz: %v", err) }
} if _, err := sw.Close(); err != nil {
wantData := wantBuf.Bytes() t.Fatalf("failed to prepare want stargz: %v", err)
want, err := Open(io.NewSectionReader( }
bytes.NewReader(wantData), 0, int64(len(wantData))), wantData := wantBuf.Bytes()
WithDecompressors(cl), want, err := Open(io.NewSectionReader(
) bytes.NewReader(wantData), 0, int64(len(wantData))),
if err != nil { WithDecompressors(cl1),
t.Fatalf("failed to parse the want stargz: %v", err) )
} if err != nil {
t.Fatalf("failed to parse the want stargz: %v", err)
}
// Prepare testing data // Prepare testing data
rc, err := Build(compressBlob(t, tarBlob, srcCompression), var opts []Option
WithChunkSize(tt.chunkSize), WithCompression(cl)) if minChunkSize > 0 {
if err != nil { opts = append(opts, WithMinChunkSize(minChunkSize))
t.Fatalf("failed to build stargz: %v", err) }
} cl2 := newCL()
defer rc.Close() rc, err := Build(compressBlob(t, tarBlob, srcCompression),
gotBuf := new(bytes.Buffer) append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...)
if _, err := io.Copy(gotBuf, rc); err != nil { if err != nil {
t.Fatalf("failed to copy built stargz blob: %v", err) t.Fatalf("failed to build stargz: %v", err)
} }
gotData := gotBuf.Bytes() defer rc.Close()
got, err := Open(io.NewSectionReader( gotBuf := new(bytes.Buffer)
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), if _, err := io.Copy(gotBuf, rc); err != nil {
WithDecompressors(cl), t.Fatalf("failed to copy built stargz blob: %v", err)
) }
if err != nil { gotData := gotBuf.Bytes()
t.Fatalf("failed to parse the got stargz: %v", err) got, err := Open(io.NewSectionReader(
} bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
WithDecompressors(cl2),
)
if err != nil {
t.Fatalf("failed to parse the got stargz: %v", err)
}
// Check DiffID is properly calculated // Check DiffID is properly calculated
rc.Close() rc.Close()
diffID := rc.DiffID() diffID := rc.DiffID()
wantDiffID := cl.DiffIDOf(t, gotData) wantDiffID := cl2.DiffIDOf(t, gotData)
if diffID.String() != wantDiffID { if diffID.String() != wantDiffID {
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
} }
// Compare as stargz // Compare as stargz
if !isSameVersion(t, cl, wantData, gotData) { if !isSameVersion(t, cl1, wantData, cl2, gotData) {
t.Errorf("built stargz hasn't same json") t.Errorf("built stargz hasn't same json")
return return
} }
if !isSameEntries(t, want, got) { if !isSameEntries(t, want, got) {
t.Errorf("built stargz isn't same as the original") t.Errorf("built stargz isn't same as the original")
return return
} }
// Compare as tar.gz // Compare as tar.gz
if !isSameTarGz(t, cl, wantData, gotData) { if !isSameTarGz(t, cl1, wantData, cl2, gotData) {
t.Errorf("built stargz isn't same tar.gz") t.Errorf("built stargz isn't same tar.gz")
return return
} }
}) })
}
} }
} }
} }
@ -244,13 +268,13 @@ func testBuild(t *testing.T, controllers ...TestingController) {
} }
} }
func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
aGz, err := controller.Reader(bytes.NewReader(a)) aGz, err := cla.Reader(bytes.NewReader(a))
if err != nil { if err != nil {
t.Fatalf("failed to read A") t.Fatalf("failed to read A")
} }
defer aGz.Close() defer aGz.Close()
bGz, err := controller.Reader(bytes.NewReader(b)) bGz, err := clb.Reader(bytes.NewReader(b))
if err != nil { if err != nil {
t.Fatalf("failed to read B") t.Fatalf("failed to read B")
} }
@ -304,12 +328,12 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
return true return true
} }
func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
if err != nil { if err != nil {
t.Fatalf("failed to parse A: %v", err) t.Fatalf("failed to parse A: %v", err)
} }
bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb)
if err != nil { if err != nil {
t.Fatalf("failed to parse B: %v", err) t.Fatalf("failed to parse B: %v", err)
} }
@ -463,7 +487,7 @@ func equalEntry(a, b *TOCEntry) bool {
a.GID == b.GID && a.GID == b.GID &&
a.Uname == b.Uname && a.Uname == b.Uname &&
a.Gname == b.Gname && a.Gname == b.Gname &&
(a.Offset > 0) == (b.Offset > 0) && (a.Offset >= 0) == (b.Offset >= 0) &&
(a.NextOffset() > 0) == (b.NextOffset() > 0) && (a.NextOffset() > 0) == (b.NextOffset() > 0) &&
a.DevMajor == b.DevMajor && a.DevMajor == b.DevMajor &&
a.DevMinor == b.DevMinor && a.DevMinor == b.DevMinor &&
@ -510,14 +534,15 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
const chunkSize = 3 const chunkSize = 3
// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) // type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
// testDigestAndVerify runs specified checks against sample stargz blobs. // testDigestAndVerify runs specified checks against sample stargz blobs.
func testDigestAndVerify(t *testing.T, controllers ...TestingController) { func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
tests := []struct { tests := []struct {
name string name string
tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
checks []check checks []check
minChunkSize []int
}{ }{
{ {
name: "no-regfile", name: "no-regfile",
@ -544,6 +569,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
regDigest(t, "test/bar.txt", "bbb", dgstMap), regDigest(t, "test/bar.txt", "bbb", dgstMap),
) )
}, },
minChunkSize: []int{0, 64000},
checks: []check{ checks: []check{
checkStargzTOC, checkStargzTOC,
checkVerifyTOC, checkVerifyTOC,
@ -581,11 +607,14 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
}, },
}, },
{ {
name: "with-non-regfiles", name: "with-non-regfiles",
minChunkSize: []int{0, 64000},
tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf( return tarOf(
regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
regDigest(t, "foo.txt", "a", dgstMap), regDigest(t, "foo.txt", "a", dgstMap),
regDigest(t, "bar/foo2.txt", "b", dgstMap),
regDigest(t, "foo3.txt", "c", dgstMap),
symlink("barlink", "test/bar.txt"), symlink("barlink", "test/bar.txt"),
dir("test/"), dir("test/"),
regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
@ -599,6 +628,8 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
checkVerifyInvalidStargzFail(buildTar(t, tarOf( checkVerifyInvalidStargzFail(buildTar(t, tarOf(
file("baz.txt", "bazbazbazbazbazbazbaz"), file("baz.txt", "bazbazbazbazbazbazbaz"),
file("foo.txt", "a"), file("foo.txt", "a"),
file("bar/foo2.txt", "b"),
file("foo3.txt", "c"),
symlink("barlink", "test/bar.txt"), symlink("barlink", "test/bar.txt"),
dir("test/"), dir("test/"),
file("test/bar.txt", "testbartestbar"), file("test/bar.txt", "testbartestbar"),
@ -612,38 +643,45 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
} }
for _, tt := range tests { for _, tt := range tests {
if len(tt.minChunkSize) == 0 {
tt.minChunkSize = []int{0}
}
for _, srcCompression := range srcCompressions { for _, srcCompression := range srcCompressions {
srcCompression := srcCompression srcCompression := srcCompression
for _, cl := range controllers { for _, newCL := range controllers {
cl := cl newCL := newCL
for _, prefix := range allowedPrefix { for _, prefix := range allowedPrefix {
prefix := prefix prefix := prefix
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat srcTarFormat := srcTarFormat
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { for _, minChunkSize := range tt.minChunkSize {
// Get original tar file and chunk digests minChunkSize := minChunkSize
dgstMap := make(map[string]digest.Digest) t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) // Get original tar file and chunk digests
dgstMap := make(map[string]digest.Digest)
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
rc, err := Build(compressBlob(t, tarBlob, srcCompression), cl := newCL()
WithChunkSize(chunkSize), WithCompression(cl)) rc, err := Build(compressBlob(t, tarBlob, srcCompression),
if err != nil { WithChunkSize(chunkSize), WithCompression(cl))
t.Fatalf("failed to convert stargz: %v", err) if err != nil {
} t.Fatalf("failed to convert stargz: %v", err)
tocDigest := rc.TOCDigest() }
defer rc.Close() tocDigest := rc.TOCDigest()
buf := new(bytes.Buffer) defer rc.Close()
if _, err := io.Copy(buf, rc); err != nil { buf := new(bytes.Buffer)
t.Fatalf("failed to copy built stargz blob: %v", err) if _, err := io.Copy(buf, rc); err != nil {
} t.Fatalf("failed to copy built stargz blob: %v", err)
newStargz := buf.Bytes() }
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. newStargz := buf.Bytes()
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
for _, check := range tt.checks { for _, check := range tt.checks {
check(t, newStargz, tocDigest, dgstMap, cl) check(t, newStargz, tocDigest, dgstMap, cl, newCL)
} }
}) })
}
} }
} }
} }
@ -654,7 +692,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
// checkStargzTOC checks the TOC JSON of the passed stargz has the expected // checkStargzTOC checks the TOC JSON of the passed stargz has the expected
// digest and contains valid chunks. It walks all entries in the stargz and // digest and contains valid chunks. It walks all entries in the stargz and
// checks all chunk digests stored to the TOC JSON match the actual contents. // checks all chunk digests stored to the TOC JSON match the actual contents.
func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
sgz, err := Open( sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
WithDecompressors(controller), WithDecompressors(controller),
@ -765,7 +803,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
// checkVerifyTOC checks the verification works for the TOC JSON of the passed // checkVerifyTOC checks the verification works for the TOC JSON of the passed
// stargz. It walks all entries in the stargz and checks the verifications for // stargz. It walks all entries in the stargz and checks the verifications for
// all chunks work. // all chunks work.
func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
sgz, err := Open( sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
WithDecompressors(controller), WithDecompressors(controller),
@ -846,7 +884,7 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
// detected during the verification and the verification returns an error. // detected during the verification and the verification returns an error.
func checkVerifyInvalidTOCEntryFail(filename string) check { func checkVerifyInvalidTOCEntryFail(filename string) check {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
funcs := map[string]rewriteFunc{ funcs := map[string]rewriteFunc{
"lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
var found bool var found bool
@ -920,8 +958,9 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
// checkVerifyInvalidStargzFail checks if the verification detects that the // checkVerifyInvalidStargzFail checks if the verification detects that the
// given stargz file doesn't match to the expected digest and returns error. // given stargz file doesn't match to the expected digest and returns error.
func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) cl := newController()
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
if err != nil { if err != nil {
t.Fatalf("failed to convert stargz: %v", err) t.Fatalf("failed to convert stargz: %v", err)
} }
@ -934,7 +973,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
sgz, err := Open( sgz, err := Open(
io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))),
WithDecompressors(controller), WithDecompressors(cl),
) )
if err != nil { if err != nil {
t.Fatalf("failed to parse converted stargz: %v", err) t.Fatalf("failed to parse converted stargz: %v", err)
@ -951,7 +990,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
// checkVerifyBrokenContentFail checks if the verifier detects broken contents // checkVerifyBrokenContentFail checks if the verifier detects broken contents
// that doesn't match to the expected digest and returns error. // that doesn't match to the expected digest and returns error.
func checkVerifyBrokenContentFail(filename string) check { func checkVerifyBrokenContentFail(filename string) check {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
// Parse stargz file // Parse stargz file
sgz, err := Open( sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
@ -1070,7 +1109,10 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
} }
// Decode the TOC JSON // Decode the TOC JSON
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) var tocReader io.Reader
if tocOffset >= 0 {
tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
}
decodedJTOC, _, err = controller.ParseTOC(tocReader) decodedJTOC, _, err = controller.ParseTOC(tocReader)
if err != nil { if err != nil {
return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
@ -1078,28 +1120,31 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
return decodedJTOC, tocOffset, nil return decodedJTOC, tocOffset, nil
} }
func testWriteAndOpen(t *testing.T, controllers ...TestingController) { func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
const content = "Some contents" const content = "Some contents"
invalidUtf8 := "\xff\xfe\xfd" invalidUtf8 := "\xff\xfe\xfd"
xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8}
sampleOwner := owner{uid: 50, gid: 100} sampleOwner := owner{uid: 50, gid: 100}
data64KB := randomContents(64000)
tests := []struct { tests := []struct {
name string name string
chunkSize int chunkSize int
in []tarEntry minChunkSize int
want []stargzCheck in []tarEntry
wantNumGz int // expected number of streams want []stargzCheck
wantNumGz int // expected number of streams
wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
wantFailOnLossLess bool wantFailOnLossLess bool
wantTOCVersion int // default = 1
}{ }{
{ {
name: "empty", name: "empty",
in: tarOf(), in: tarOf(),
wantNumGz: 2, // empty tar + TOC + footer wantNumGz: 2, // (empty tar) + TOC + footer
wantNumGzLossLess: 3, // empty tar + TOC + footer
want: checks( want: checks(
numTOCEntries(0), numTOCEntries(0),
), ),
@ -1195,7 +1240,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
dir("foo/"), dir("foo/"),
file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"),
), ),
wantNumGz: 9, wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer
want: checks( want: checks(
numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file
hasDir("foo/"), hasDir("foo/"),
@ -1326,23 +1371,108 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
mustSameEntry("foo/foo1", "foolink"), mustSameEntry("foo/foo1", "foolink"),
), ),
}, },
{
name: "several_files_in_chunk",
minChunkSize: 8000,
in: tarOf(
dir("foo/"),
file("foo/foo1", data64KB),
file("foo2", "bb"),
file("foo22", "ccc"),
dir("bar/"),
file("bar/bar.txt", "aaa"),
file("foo3", data64KB),
),
// NOTE: we assume that the compressed "data64KB" is still larger than 8KB
wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
want: checks(
numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3
hasDir("foo/"),
hasDir("bar/"),
hasFileLen("foo/foo1", len(data64KB)),
hasFileLen("foo2", len("bb")),
hasFileLen("foo22", len("ccc")),
hasFileLen("bar/bar.txt", len("aaa")),
hasFileLen("foo3", len(data64KB)),
hasFileDigest("foo/foo1", digestFor(data64KB)),
hasFileDigest("foo2", digestFor("bb")),
hasFileDigest("foo22", digestFor("ccc")),
hasFileDigest("bar/bar.txt", digestFor("aaa")),
hasFileDigest("foo3", digestFor(data64KB)),
hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}),
hasFileContentsRange("foo/foo1", 0, data64KB),
hasFileContentsRange("foo2", 0, "bb"),
hasFileContentsRange("foo2", 1, "b"),
hasFileContentsRange("foo22", 0, "ccc"),
hasFileContentsRange("foo22", 1, "cc"),
hasFileContentsRange("foo22", 2, "c"),
hasFileContentsRange("bar/bar.txt", 0, "aaa"),
hasFileContentsRange("bar/bar.txt", 1, "aa"),
hasFileContentsRange("bar/bar.txt", 2, "a"),
hasFileContentsRange("foo3", 0, data64KB),
hasFileContentsRange("foo3", 1, data64KB[1:]),
hasFileContentsRange("foo3", 2, data64KB[2:]),
hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
),
},
{
name: "several_files_in_chunk_chunked",
minChunkSize: 8000,
chunkSize: 32000,
in: tarOf(
dir("foo/"),
file("foo/foo1", data64KB),
file("foo2", "bb"),
dir("bar/"),
file("foo3", data64KB),
),
// NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
want: checks(
numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks)
hasDir("foo/"),
hasDir("bar/"),
hasFileLen("foo/foo1", len(data64KB)),
hasFileLen("foo2", len("bb")),
hasFileLen("foo3", len(data64KB)),
hasFileDigest("foo/foo1", digestFor(data64KB)),
hasFileDigest("foo2", digestFor("bb")),
hasFileDigest("foo3", digestFor(data64KB)),
hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}),
hasFileContentsRange("foo/foo1", 0, data64KB),
hasFileContentsRange("foo/foo1", 1, data64KB[1:]),
hasFileContentsRange("foo/foo1", 2, data64KB[2:]),
hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]),
hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]),
hasFileContentsRange("foo2", 0, "bb"),
hasFileContentsRange("foo2", 1, "b"),
hasFileContentsRange("foo3", 0, data64KB),
hasFileContentsRange("foo3", 1, data64KB[1:]),
hasFileContentsRange("foo3", 2, data64KB[2:]),
hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
),
},
} }
for _, tt := range tests { for _, tt := range tests {
for _, cl := range controllers { for _, newCL := range controllers {
cl := cl newCL := newCL
for _, prefix := range allowedPrefix { for _, prefix := range allowedPrefix {
prefix := prefix prefix := prefix
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat srcTarFormat := srcTarFormat
for _, lossless := range []bool{true, false} { for _, lossless := range []bool{true, false} {
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
origTarDgstr := digest.Canonical.Digester() origTarDgstr := digest.Canonical.Digester()
tr = io.TeeReader(tr, origTarDgstr.Hash()) tr = io.TeeReader(tr, origTarDgstr.Hash())
var stargzBuf bytes.Buffer var stargzBuf bytes.Buffer
w := NewWriterWithCompressor(&stargzBuf, cl) cl1 := newCL()
w := NewWriterWithCompressor(&stargzBuf, cl1)
w.ChunkSize = tt.chunkSize w.ChunkSize = tt.chunkSize
w.MinChunkSize = tt.minChunkSize
if lossless { if lossless {
err := w.AppendTarLossLess(tr) err := w.AppendTarLossLess(tr)
if tt.wantFailOnLossLess { if tt.wantFailOnLossLess {
@ -1366,7 +1496,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
if lossless { if lossless {
// Check if the result blob reserves original tar metadata // Check if the result blob reserves original tar metadata
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1)
if err != nil { if err != nil {
t.Errorf("failed to decompress blob: %v", err) t.Errorf("failed to decompress blob: %v", err)
return return
@ -1385,32 +1515,71 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
} }
diffID := w.DiffID() diffID := w.DiffID()
wantDiffID := cl.DiffIDOf(t, b) wantDiffID := cl1.DiffIDOf(t, b)
if diffID != wantDiffID { if diffID != wantDiffID {
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
} }
got := cl.CountStreams(t, b)
wantNumGz := tt.wantNumGz
if lossless && tt.wantNumGzLossLess > 0 {
wantNumGz = tt.wantNumGzLossLess
}
if got != wantNumGz {
t.Errorf("number of streams = %d; want %d", got, wantNumGz)
}
telemetry, checkCalled := newCalledTelemetry() telemetry, checkCalled := newCalledTelemetry()
sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
r, err := Open( r, err := Open(
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), sr,
WithDecompressors(cl), WithDecompressors(cl1),
WithTelemetry(telemetry), WithTelemetry(telemetry),
) )
if err != nil { if err != nil {
t.Fatalf("stargz.Open: %v", err) t.Fatalf("stargz.Open: %v", err)
} }
if err := checkCalled(); err != nil { wantTOCVersion := 1
if tt.wantTOCVersion > 0 {
wantTOCVersion = tt.wantTOCVersion
}
if r.toc.Version != wantTOCVersion {
t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion)
}
footerSize := cl1.FooterSize()
footerOffset := sr.Size() - footerSize
footer := make([]byte, footerSize)
if _, err := sr.ReadAt(footer, footerOffset); err != nil {
t.Errorf("failed to read footer: %v", err)
}
_, tocOffset, _, err := cl1.ParseFooter(footer)
if err != nil {
t.Errorf("failed to parse footer: %v", err)
}
if err := checkCalled(tocOffset >= 0); err != nil {
t.Errorf("telemetry failure: %v", err) t.Errorf("telemetry failure: %v", err)
} }
wantNumGz := tt.wantNumGz
if lossless && tt.wantNumGzLossLess > 0 {
wantNumGz = tt.wantNumGzLossLess
}
streamOffsets := []int64{0}
prevOffset := int64(-1)
streams := 0
for _, e := range r.toc.Entries {
if e.Offset > prevOffset {
streamOffsets = append(streamOffsets, e.Offset)
prevOffset = e.Offset
streams++
}
}
streams++ // TOC
if tocOffset >= 0 {
// toc is in the blob
streamOffsets = append(streamOffsets, tocOffset)
}
streams++ // footer
streamOffsets = append(streamOffsets, footerOffset)
if streams != wantNumGz {
t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz)
}
t.Logf("testing streams: %+v", streamOffsets)
cl1.TestStreams(t, b, streamOffsets)
for _, want := range tt.want { for _, want := range tt.want {
want.check(t, r) want.check(t, r)
} }
@ -1422,7 +1591,12 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
} }
} }
func newCalledTelemetry() (telemetry *Telemetry, check func() error) { type chunkInfo struct {
name string
data string
}
func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) {
var getFooterLatencyCalled bool var getFooterLatencyCalled bool
var getTocLatencyCalled bool var getTocLatencyCalled bool
var deserializeTocLatencyCalled bool var deserializeTocLatencyCalled bool
@ -1430,13 +1604,15 @@ func newCalledTelemetry() (telemetry *Telemetry, check func() error) {
func(time.Time) { getFooterLatencyCalled = true }, func(time.Time) { getFooterLatencyCalled = true },
func(time.Time) { getTocLatencyCalled = true }, func(time.Time) { getTocLatencyCalled = true },
func(time.Time) { deserializeTocLatencyCalled = true }, func(time.Time) { deserializeTocLatencyCalled = true },
}, func() error { }, func(needsGetTOC bool) error {
var allErr []error var allErr []error
if !getFooterLatencyCalled { if !getFooterLatencyCalled {
allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called"))
} }
if !getTocLatencyCalled { if needsGetTOC {
allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) if !getTocLatencyCalled {
allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
}
} }
if !deserializeTocLatencyCalled { if !deserializeTocLatencyCalled {
allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called"))
@ -1573,6 +1749,53 @@ func hasFileDigest(file string, digest string) stargzCheck {
}) })
} }
func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
extraMap := make(map[string]chunkInfo)
for _, e := range extra {
extraMap[e.name] = e
}
var extraNames []string
for n := range extraMap {
extraNames = append(extraNames, n)
}
f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error {
t.Logf("On %q: got preread of %q", file, e.Name)
ex, ok := extraMap[e.Name]
if !ok {
t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames)
}
got, err := io.ReadAll(cr)
if err != nil {
t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err)
}
if ex.data != string(got) {
t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data))
}
delete(extraMap, e.Name)
return nil
})
if err != nil {
t.Fatal(err)
}
got := make([]byte, len(want))
n, err := f.ReadAt(got, int64(offset))
if err != nil {
t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err)
}
if string(got) != want {
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
}
if len(extraMap) != 0 {
var exNames []string
for _, ex := range extraMap {
exNames = append(exNames, ex.name)
}
t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames)
}
})
}
func hasFileContentsRange(file string, offset int, want string) stargzCheck { func hasFileContentsRange(file string, offset int, want string) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) { return stargzCheckFn(func(t *testing.T, r *Reader) {
f, err := r.OpenFile(file) f, err := r.OpenFile(file)
@ -1585,7 +1808,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck {
t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err)
} }
if string(got) != want { if string(got) != want {
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
} }
}) })
} }
@ -1797,6 +2020,13 @@ func mustSameEntry(files ...string) stargzCheck {
}) })
} }
func viewContent(c []byte) string {
if len(c) < 100 {
return string(c)
}
return string(c[:50]) + "...(omit)..." + string(c[50:100])
}
func tarOf(s ...tarEntry) []tarEntry { return s } func tarOf(s ...tarEntry) []tarEntry { return s }
type tarEntry interface { type tarEntry interface {
@ -2056,6 +2286,16 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin
}) })
} }
var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randomContents(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = runes[rand.Intn(len(runes))]
}
return string(b)
}
func fileModeToTarMode(mode os.FileMode) (int64, error) { func fileModeToTarMode(mode os.FileMode) (int64, error) {
h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "")
if err != nil { if err != nil {
@ -2073,3 +2313,54 @@ func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) }
func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
func (f fileInfoOnlyMode) Sys() interface{} { return nil } func (f fileInfoOnlyMode) Sys() interface{} { return nil }
func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
if len(streams) == 0 {
return // nop
}
wants := map[int64]struct{}{}
for _, s := range streams {
wants[s] = struct{}{}
}
len0 := len(b)
br := bytes.NewReader(b)
zr := new(gzip.Reader)
t.Logf("got gzip streams:")
numStreams := 0
for {
zoff := len0 - br.Len()
if err := zr.Reset(br); err != nil {
if err == io.EOF {
return
}
t.Fatalf("countStreams(gzip), Reset: %v", err)
}
zr.Multistream(false)
n, err := io.Copy(io.Discard, zr)
if err != nil {
t.Fatalf("countStreams(gzip), Copy: %v", err)
}
var extra string
if len(zr.Header.Extra) > 0 {
extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
}
t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
delete(wants, int64(zoff))
numStreams++
}
}
func GzipDiffIDOf(t *testing.T, b []byte) string {
h := sha256.New()
zr, err := gzip.NewReader(bytes.NewReader(b))
if err != nil {
t.Fatalf("diffIDOf(gzip): %v", err)
}
defer zr.Close()
if _, err := io.Copy(h, zr); err != nil {
t.Fatalf("diffIDOf(gzip).Copy: %v", err)
}
return fmt.Sprintf("sha256:%x", h.Sum(nil))
}

View File

@ -149,6 +149,12 @@ type TOCEntry struct {
// ChunkSize. // ChunkSize.
Offset int64 `json:"offset,omitempty"` Offset int64 `json:"offset,omitempty"`
// InnerOffset is an optional field indicates uncompressed offset
// of this "reg" or "chunk" payload in a stream starts from Offset.
// This field enables to put multiple "reg" or "chunk" payloads
// in one chunk with having the same Offset but different InnerOffset.
InnerOffset int64 `json:"innerOffset,omitempty"`
nextOffset int64 // the Offset of the next entry with a non-zero Offset nextOffset int64 // the Offset of the next entry with a non-zero Offset
// DevMajor is the major device number for "char" and "block" types. // DevMajor is the major device number for "char" and "block" types.
@ -186,6 +192,9 @@ type TOCEntry struct {
ChunkDigest string `json:"chunkDigest,omitempty"` ChunkDigest string `json:"chunkDigest,omitempty"`
children map[string]*TOCEntry children map[string]*TOCEntry
// chunkTopIndex is index of the entry where Offset starts in the blob.
chunkTopIndex int
} }
// ModTime returns the entry's modification time. // ModTime returns the entry's modification time.
@ -279,7 +288,10 @@ type Compressor interface {
// Writer returns WriteCloser to be used for writing a chunk to eStargz. // Writer returns WriteCloser to be used for writing a chunk to eStargz.
// Everytime a chunk is written, the WriteCloser is closed and Writer is // Everytime a chunk is written, the WriteCloser is closed and Writer is
// called again for writing the next chunk. // called again for writing the next chunk.
Writer(w io.Writer) (io.WriteCloser, error) //
// The returned writer should implement "Flush() error" function that flushes
// any pending compressed data to the underlying writer.
Writer(w io.Writer) (WriteFlushCloser, error)
// WriteTOCAndFooter is called to write JTOC to the passed Writer. // WriteTOCAndFooter is called to write JTOC to the passed Writer.
// diffHash calculates the DiffID (uncompressed sha256 hash) of the blob // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
@ -303,8 +315,12 @@ type Decompressor interface {
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
// the top until the TOC JSON). // the top until the TOC JSON).
// //
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it.
//
// tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the
// footer (blob size - tocOff - FooterSize).
// If blobPayloadSize < 0, blobPayloadSize become the blob size.
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents // ParseTOC parses TOC from the passed reader. The reader provides the partial contents
@ -313,5 +329,14 @@ type Decompressor interface {
// This function returns tocDgst that represents the digest of TOC that will be used // This function returns tocDgst that represents the digest of TOC that will be used
// to verify this blob. This must match to the value returned from // to verify this blob. This must match to the value returned from
// Compressor.WriteTOCAndFooter that is used when creating this blob. // Compressor.WriteTOCAndFooter that is used when creating this blob.
//
// If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
// Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location
// and return it.
ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
} }
type WriteFlushCloser interface {
io.WriteCloser
Flush() error
}

View File

@ -72,6 +72,8 @@ fedora_testing_task: &fedora_testing
TEST_DRIVER: "vfs" TEST_DRIVER: "vfs"
- env: - env:
TEST_DRIVER: "overlay" TEST_DRIVER: "overlay"
- env:
TEST_DRIVER: "overlay-transient"
- env: - env:
TEST_DRIVER: "fuse-overlay" TEST_DRIVER: "fuse-overlay"
- env: - env:
@ -114,7 +116,7 @@ lint_task:
echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list
apt-get update apt-get update
apt-get install -y libbtrfs-dev libdevmapper-dev apt-get install -y libbtrfs-dev libdevmapper-dev
test_script: make local-validate && make lint test_script: make TAGS=regex_precompile local-validate && make lint && make clean
# Update metadata on VM images referenced by this repository state # Update metadata on VM images referenced by this repository state

View File

@ -60,7 +60,7 @@ local-gccgo: ## build using gccgo on the host
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
local-cross: ## cross build the binaries for arm, darwin, and freebsd local-cross: ## cross build the binaries for arm, darwin, and freebsd
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
os=`echo $${target} | cut -f1 -d/` ; \ os=`echo $${target} | cut -f1 -d/` ; \
arch=`echo $${target} | cut -f2 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \
suffix=$${os}.$${arch} ; \ suffix=$${os}.$${arch} ; \
@ -117,7 +117,7 @@ help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) @awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
vendor-in-container: vendor-in-container:
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang:1.17 make vendor
vendor: vendor:
$(GO) mod tidy -compat=1.17 $(GO) mod tidy -compat=1.17

View File

@ -1 +1 @@
1.44.0 1.45.0

View File

@ -10,11 +10,28 @@ import (
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex" "github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
) )
type containerLocations uint8
// The backing store is split in two json files, one (the volatile)
// that is written without fsync() meaning it isn't as robust to
// unclean shutdown
const (
stableContainerLocation containerLocations = 1 << iota
volatileContainerLocation
numContainerLocationIndex = iota
)
func containerLocationFromIndex(index int) containerLocations {
return 1 << index
}
// A Container is a reference to a read-write layer with metadata. // A Container is a reference to a read-write layer with metadata.
type Container struct { type Container struct {
// ID is either one which was specified at create-time, or a random // ID is either one which was specified at create-time, or a random
@ -64,6 +81,9 @@ type Container struct {
GIDMap []idtools.IDMap `json:"gidmap,omitempty"` GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
Flags map[string]interface{} `json:"flags,omitempty"` Flags map[string]interface{} `json:"flags,omitempty"`
// volatileStore is true if the container is from the volatile json file
volatileStore bool `json:"-"`
} }
// rwContainerStore provides bookkeeping for information about Containers. // rwContainerStore provides bookkeeping for information about Containers.
@ -115,17 +135,27 @@ type rwContainerStore interface {
// Containers returns a slice enumerating the known containers. // Containers returns a slice enumerating the known containers.
Containers() ([]Container, error) Containers() ([]Container, error)
// Clean up unreferenced datadirs
GarbageCollect() error
} }
type containerStore struct { type containerStore struct {
lockfile Locker // The following fields are only set when constructing containerStore, and must never be modified afterwards.
dir string // They are safe to access without any other locking.
lockfile *lockfile.LockFile // Synchronizes readers vs. writers of the _filesystem data_, both cross-process and in-process.
dir string
jsonPath [numContainerLocationIndex]string
inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held.
// The following fields can only be read/written with read/write ownership of inProcessLock, respectively.
// Almost all users should use startReading() or startWriting().
lastWrite lockfile.LastWrite
containers []*Container containers []*Container
idindex *truncindex.TruncIndex idindex *truncindex.TruncIndex
byid map[string]*Container byid map[string]*Container
bylayer map[string]*Container bylayer map[string]*Container
byname map[string]*Container byname map[string]*Container
loadMut sync.Mutex
} }
func copyContainer(c *Container) *Container { func copyContainer(c *Container) *Container {
@ -142,6 +172,7 @@ func copyContainer(c *Container) *Container {
UIDMap: copyIDMap(c.UIDMap), UIDMap: copyIDMap(c.UIDMap),
GIDMap: copyIDMap(c.GIDMap), GIDMap: copyIDMap(c.GIDMap),
Flags: copyStringInterfaceMap(c.Flags), Flags: copyStringInterfaceMap(c.Flags),
volatileStore: c.volatileStore,
} }
} }
@ -176,6 +207,14 @@ func (c *Container) MountOpts() []string {
} }
} }
// The caller must hold r.inProcessLock for reading.
func containerLocation(c *Container) containerLocations {
if c.volatileStore {
return volatileContainerLocation
}
return stableContainerLocation
}
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing. // startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
// If this succeeds, the caller MUST call stopWriting(). // If this succeeds, the caller MUST call stopWriting().
// //
@ -183,15 +222,17 @@ func (c *Container) MountOpts() []string {
// should use startWriting() instead. // should use startWriting() instead.
func (r *containerStore) startWritingWithReload(canReload bool) error { func (r *containerStore) startWritingWithReload(canReload bool) error {
r.lockfile.Lock() r.lockfile.Lock()
r.inProcessLock.Lock()
succeeded := false succeeded := false
defer func() { defer func() {
if !succeeded { if !succeeded {
r.inProcessLock.Unlock()
r.lockfile.Unlock() r.lockfile.Unlock()
} }
}() }()
if canReload { if canReload {
if err := r.reloadIfChanged(true); err != nil { if _, err := r.reloadIfChanged(true); err != nil {
return err return err
} }
} }
@ -208,48 +249,145 @@ func (r *containerStore) startWriting() error {
// stopWriting releases locks obtained by startWriting. // stopWriting releases locks obtained by startWriting.
func (r *containerStore) stopWriting() { func (r *containerStore) stopWriting() {
r.inProcessLock.Unlock()
r.lockfile.Unlock() r.lockfile.Unlock()
} }
// startReading makes sure the store is fresh, and locks it for reading. // startReading makes sure the store is fresh, and locks it for reading.
// If this succeeds, the caller MUST call stopReading(). // If this succeeds, the caller MUST call stopReading().
func (r *containerStore) startReading() error { func (r *containerStore) startReading() error {
r.lockfile.RLock() // inProcessLocked calls the nested function with r.inProcessLock held for writing.
succeeded := false inProcessLocked := func(fn func() error) error {
defer func() { r.inProcessLock.Lock()
if !succeeded { defer r.inProcessLock.Unlock()
r.lockfile.Unlock() return fn()
}
}()
if err := r.reloadIfChanged(false); err != nil {
return err
} }
succeeded = true r.lockfile.RLock()
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil.
defer func() {
if unlockFn != nil {
unlockFn()
}
}()
r.inProcessLock.RLock()
unlockFn = r.stopReading
// If we are lucky, we can just hold the read locks, check that we are fresh, and continue.
_, modified, err := r.modified()
if err != nil {
return err
}
if modified {
// We are unlucky, and need to reload.
// NOTE: Multiple goroutines can get to this place approximately simultaneously.
r.inProcessLock.RUnlock()
unlockFn = r.lockfile.Unlock
// r.lastWrite can change at this point if another goroutine reloads the store before us. Thats why we dont unconditionally
// trigger a load below; we (lock and) reloadIfChanged() again.
// First try reloading with r.lockfile held for reading.
// r.inProcessLock will serialize all goroutines that got here;
// each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data.
var tryLockedForWriting bool
if err := inProcessLocked(func() error {
// We could optimize this further: The r.lockfile.GetLastWrite() value shouldnt change as long as we hold r.lockfile,
// so if r.lastWrite was already updated, we dont need to actually read the on-filesystem lock.
var err error
tryLockedForWriting, err = r.reloadIfChanged(false)
return err
}); err != nil {
if !tryLockedForWriting {
return err
}
// Not good enough, we need r.lockfile held for writing. So, lets do that.
unlockFn()
unlockFn = nil
r.lockfile.Lock()
unlockFn = r.lockfile.Unlock
if err := inProcessLocked(func() error {
_, err := r.reloadIfChanged(true)
return err
}); err != nil {
return err
}
unlockFn()
unlockFn = nil
r.lockfile.RLock()
unlockFn = r.lockfile.Unlock
// We need to check for a reload once more because the on-disk state could have been modified
// after we released the lock.
// If that, _again_, finds inconsistent state, just give up.
// We could, plausibly, retry a few times, but that inconsistent state (duplicate container names)
// shouldnt be saved (by correct implementations) in the first place.
if err := inProcessLocked(func() error {
_, err := r.reloadIfChanged(false)
return err
}); err != nil {
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
}
}
// NOTE that we hold neither a read nor write inProcessLock at this point. Thats fine in ordinary operation, because
// the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock
// protects us against in-process writers modifying data.
// In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date
// and 2) access to the in-memory data is not racy;
// but we cant protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state.
r.inProcessLock.RLock()
}
unlockFn = nil
return nil return nil
} }
// stopReading releases locks obtained by startReading. // stopReading releases locks obtained by startReading.
func (r *containerStore) stopReading() { func (r *containerStore) stopReading() {
r.inProcessLock.RUnlock()
r.lockfile.Unlock() r.lockfile.Unlock()
} }
// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store),
// and a lockfile.LastWrite value for that update.
//
// The caller must hold r.lockfile for reading _or_ writing.
// The caller must hold r.inProcessLock for reading or writing.
func (r *containerStore) modified() (lockfile.LastWrite, bool, error) {
return r.lockfile.ModifiedSince(r.lastWrite)
}
// reloadIfChanged reloads the contents of the store from disk if it is changed. // reloadIfChanged reloads the contents of the store from disk if it is changed.
// //
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true // The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing. // if it is held for writing.
func (r *containerStore) reloadIfChanged(lockedForWriting bool) error { //
r.loadMut.Lock() // The caller must hold r.inProcessLock for WRITING.
defer r.loadMut.Unlock() //
// If !lockedForWriting and this function fails, the return value indicates whether
modified, err := r.lockfile.Modified() // reloadIfChanged() with lockedForWriting could succeed.
if err == nil && modified { func (r *containerStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
return r.load(lockedForWriting) lastWrite, modified, err := r.modified()
if err != nil {
return false, err
} }
return err // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load()
// and modify no fields, to ensure they see fresh data:
// r.lockfile.Modified() only returns true once per change. Without an exclusive lock,
// one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could
// see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale.
if modified {
if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
}
r.lastWrite = lastWrite
}
return false, nil
} }
// Requires startReading or startWriting.
func (r *containerStore) Containers() ([]Container, error) { func (r *containerStore) Containers() ([]Container, error) {
containers := make([]Container, len(r.containers)) containers := make([]Container, len(r.containers))
for i := range r.containers { for i := range r.containers {
@ -258,8 +396,38 @@ func (r *containerStore) Containers() ([]Container, error) {
return containers, nil return containers, nil
} }
func (r *containerStore) containerspath() string { // This looks for datadirs in the store directory that are not referenced
return filepath.Join(r.dir, "containers.json") // by the json file and removes it. These can happen in the case of unclean
// shutdowns or regular restarts in transient store mode.
// Requires startReading.
func (r *containerStore) GarbageCollect() error {
entries, err := os.ReadDir(r.dir)
if err != nil {
// Unexpected, don't try any GC
return err
}
for _, entry := range entries {
id := entry.Name()
// Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) {
continue
}
// Should the id be there?
if r.byid[id] != nil {
continue
}
// Otherwise remove datadir
moreErr := os.RemoveAll(filepath.Join(r.dir, id))
// Propagate first error
if moreErr != nil && err == nil {
err = moreErr
}
}
return err
} }
func (r *containerStore) datadir(id string) string { func (r *containerStore) datadir(id string) string {
@ -272,34 +440,63 @@ func (r *containerStore) datapath(id, key string) string {
// load reloads the contents of the store from disk. // load reloads the contents of the store from disk.
// //
// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
// manage r.lastWrite.
//
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true // The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing. // if it is held for writing.
func (r *containerStore) load(lockedForWriting bool) error { // The caller must hold r.inProcessLock for WRITING.
needSave := false //
rpath := r.containerspath() // If !lockedForWriting and this function fails, the return value indicates whether
data, err := os.ReadFile(rpath) // retrying with lockedForWriting could succeed.
if err != nil && !os.IsNotExist(err) { func (r *containerStore) load(lockedForWriting bool) (bool, error) {
return err var modifiedLocations containerLocations
}
containers := []*Container{} containers := []*Container{}
if len(data) != 0 {
if err := json.Unmarshal(data, &containers); err != nil { ids := make(map[string]*Container)
return fmt.Errorf("loading %q: %w", rpath, err)
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
location := containerLocationFromIndex(locationIndex)
rpath := r.jsonPath[locationIndex]
data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return false, err
}
locationContainers := []*Container{}
if len(data) != 0 {
if err := json.Unmarshal(data, &locationContainers); err != nil {
return false, fmt.Errorf("loading %q: %w", rpath, err)
}
}
for _, container := range locationContainers {
// There should be no duplicated ids between json files, but lets check to be sure
if ids[container.ID] != nil {
continue // skip invalid duplicated container
}
// Remember where the container came from
if location == volatileContainerLocation {
container.volatileStore = true
}
containers = append(containers, container)
ids[container.ID] = container
} }
} }
idlist := make([]string, 0, len(containers)) idlist := make([]string, 0, len(containers))
layers := make(map[string]*Container) layers := make(map[string]*Container)
ids := make(map[string]*Container)
names := make(map[string]*Container) names := make(map[string]*Container)
var errorToResolveBySaving error // == nil
for n, container := range containers { for n, container := range containers {
idlist = append(idlist, container.ID) idlist = append(idlist, container.ID)
ids[container.ID] = containers[n]
layers[container.LayerID] = containers[n] layers[container.LayerID] = containers[n]
for _, name := range container.Names { for _, name := range container.Names {
if conflict, ok := names[name]; ok { if conflict, ok := names[name]; ok {
r.removeName(conflict, name) r.removeName(conflict, name)
needSave = true errorToResolveBySaving = errors.New("container store is inconsistent and the current caller does not hold a write lock")
modifiedLocations |= containerLocation(container)
} }
names[name] = containers[n] names[name] = containers[n]
} }
@ -310,60 +507,109 @@ func (r *containerStore) load(lockedForWriting bool) error {
r.byid = ids r.byid = ids
r.bylayer = layers r.bylayer = layers
r.byname = names r.byname = names
if needSave { if errorToResolveBySaving != nil {
if !lockedForWriting { if !lockedForWriting {
// Eventually, the callers should be modified to retry with a write lock, instead. return true, errorToResolveBySaving
return errors.New("container store is inconsistent and the current caller does not hold a write lock")
} }
return r.Save() return false, r.save(modifiedLocations)
} }
return nil return false, nil
} }
// Save saves the contents of the store to disk. It should be called with // save saves the contents of the store to disk.
// the lock held, locked for writing. // The caller must hold r.lockfile locked for writing.
func (r *containerStore) Save() error { // The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
func (r *containerStore) save(saveLocations containerLocations) error {
r.lockfile.AssertLockedForWriting() r.lockfile.AssertLockedForWriting()
rpath := r.containerspath() for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { location := containerLocationFromIndex(locationIndex)
return err if location&saveLocations == 0 {
continue
}
rpath := r.jsonPath[locationIndex]
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
return err
}
subsetContainers := make([]*Container, 0, len(r.containers))
for _, container := range r.containers {
if containerLocation(container) == location {
subsetContainers = append(subsetContainers, container)
}
}
jdata, err := json.Marshal(&subsetContainers)
if err != nil {
return err
}
var opts *ioutils.AtomicFileWriterOptions
if location == volatileContainerLocation {
opts = &ioutils.AtomicFileWriterOptions{
NoSync: true,
}
}
if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil {
return err
}
} }
jdata, err := json.Marshal(&r.containers) lw, err := r.lockfile.RecordWrite()
if err != nil { if err != nil {
return err return err
} }
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil { r.lastWrite = lw
return err return nil
}
return r.lockfile.Touch()
} }
func newContainerStore(dir string) (rwContainerStore, error) { // saveFor saves the contents of the store relevant for modifiedContainer to disk.
// The caller must hold r.lockfile locked for writing.
// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
func (r *containerStore) saveFor(modifiedContainer *Container) error {
return r.save(containerLocation(modifiedContainer))
}
func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) {
if err := os.MkdirAll(dir, 0700); err != nil { if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err return nil, err
} }
lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock")) volatileDir := dir
if transient {
if err := os.MkdirAll(runDir, 0700); err != nil {
return nil, err
}
volatileDir = runDir
}
lockfile, err := lockfile.GetLockFile(filepath.Join(volatileDir, "containers.lock"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
cstore := containerStore{ cstore := containerStore{
lockfile: lockfile, lockfile: lockfile,
dir: dir, dir: dir,
jsonPath: [numContainerLocationIndex]string{
filepath.Join(dir, "containers.json"),
filepath.Join(volatileDir, "volatile-containers.json"),
},
containers: []*Container{}, containers: []*Container{},
byid: make(map[string]*Container), byid: make(map[string]*Container),
bylayer: make(map[string]*Container), bylayer: make(map[string]*Container),
byname: make(map[string]*Container), byname: make(map[string]*Container),
} }
if err := cstore.startWritingWithReload(false); err != nil { if err := cstore.startWritingWithReload(false); err != nil {
return nil, err return nil, err
} }
cstore.lastWrite, err = cstore.lockfile.GetLastWrite()
if err != nil {
return nil, err
}
defer cstore.stopWriting() defer cstore.stopWriting()
if err := cstore.load(true); err != nil { if _, err := cstore.load(true); err != nil {
return nil, err return nil, err
} }
return &cstore, nil return &cstore, nil
} }
// Requires startReading or startWriting.
func (r *containerStore) lookup(id string) (*Container, bool) { func (r *containerStore) lookup(id string) (*Container, bool) {
if container, ok := r.byid[id]; ok { if container, ok := r.byid[id]; ok {
return container, ok return container, ok
@ -379,15 +625,17 @@ func (r *containerStore) lookup(id string) (*Container, bool) {
return nil, false return nil, false
} }
// Requires startWriting.
func (r *containerStore) ClearFlag(id string, flag string) error { func (r *containerStore) ClearFlag(id string, flag string) error {
container, ok := r.lookup(id) container, ok := r.lookup(id)
if !ok { if !ok {
return ErrContainerUnknown return ErrContainerUnknown
} }
delete(container.Flags, flag) delete(container.Flags, flag)
return r.Save() return r.saveFor(container)
} }
// Requires startWriting.
func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { func (r *containerStore) SetFlag(id string, flag string, value interface{}) error {
container, ok := r.lookup(id) container, ok := r.lookup(id)
if !ok { if !ok {
@ -397,9 +645,10 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
container.Flags = make(map[string]interface{}) container.Flags = make(map[string]interface{})
} }
container.Flags[flag] = value container.Flags[flag] = value
return r.Save() return r.saveFor(container)
} }
// Requires startWriting.
func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) { func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) {
if id == "" { if id == "" {
id = stringid.GenerateRandomID() id = stringid.GenerateRandomID()
@ -443,6 +692,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
Flags: copyStringInterfaceMap(options.Flags), Flags: copyStringInterfaceMap(options.Flags),
UIDMap: copyIDMap(options.UIDMap), UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap), GIDMap: copyIDMap(options.GIDMap),
volatileStore: options.Volatile,
} }
r.containers = append(r.containers, container) r.containers = append(r.containers, container)
r.byid[id] = container r.byid[id] = container
@ -453,11 +703,12 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
for _, name := range names { for _, name := range names {
r.byname[name] = container r.byname[name] = container
} }
err = r.Save() err = r.saveFor(container)
container = copyContainer(container) container = copyContainer(container)
return container, err return container, err
} }
// Requires startReading or startWriting.
func (r *containerStore) Metadata(id string) (string, error) { func (r *containerStore) Metadata(id string) (string, error) {
if container, ok := r.lookup(id); ok { if container, ok := r.lookup(id); ok {
return container.Metadata, nil return container.Metadata, nil
@ -465,18 +716,21 @@ func (r *containerStore) Metadata(id string) (string, error) {
return "", ErrContainerUnknown return "", ErrContainerUnknown
} }
// Requires startWriting.
func (r *containerStore) SetMetadata(id, metadata string) error { func (r *containerStore) SetMetadata(id, metadata string) error {
if container, ok := r.lookup(id); ok { if container, ok := r.lookup(id); ok {
container.Metadata = metadata container.Metadata = metadata
return r.Save() return r.saveFor(container)
} }
return ErrContainerUnknown return ErrContainerUnknown
} }
// The caller must hold r.inProcessLock for writing.
func (r *containerStore) removeName(container *Container, name string) { func (r *containerStore) removeName(container *Container, name string) {
container.Names = stringSliceWithoutValue(container.Names, name) container.Names = stringSliceWithoutValue(container.Names, name)
} }
// Requires startWriting.
func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error {
container, ok := r.lookup(id) container, ok := r.lookup(id)
if !ok { if !ok {
@ -497,9 +751,10 @@ func (r *containerStore) updateNames(id string, names []string, op updateNameOpe
r.byname[name] = container r.byname[name] = container
} }
container.Names = names container.Names = names
return r.Save() return r.saveFor(container)
} }
// Requires startWriting.
func (r *containerStore) Delete(id string) error { func (r *containerStore) Delete(id string) error {
container, ok := r.lookup(id) container, ok := r.lookup(id)
if !ok { if !ok {
@ -529,7 +784,7 @@ func (r *containerStore) Delete(id string) error {
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
} }
} }
if err := r.Save(); err != nil { if err := r.saveFor(container); err != nil {
return err return err
} }
if err := os.RemoveAll(r.datadir(id)); err != nil { if err := os.RemoveAll(r.datadir(id)); err != nil {
@ -538,6 +793,7 @@ func (r *containerStore) Delete(id string) error {
return nil return nil
} }
// Requires startReading or startWriting.
func (r *containerStore) Get(id string) (*Container, error) { func (r *containerStore) Get(id string) (*Container, error) {
if container, ok := r.lookup(id); ok { if container, ok := r.lookup(id); ok {
return copyContainer(container), nil return copyContainer(container), nil
@ -545,6 +801,7 @@ func (r *containerStore) Get(id string) (*Container, error) {
return nil, ErrContainerUnknown return nil, ErrContainerUnknown
} }
// Requires startReading or startWriting.
func (r *containerStore) Lookup(name string) (id string, err error) { func (r *containerStore) Lookup(name string) (id string, err error) {
if container, ok := r.lookup(name); ok { if container, ok := r.lookup(name); ok {
return container.ID, nil return container.ID, nil
@ -552,11 +809,13 @@ func (r *containerStore) Lookup(name string) (id string, err error) {
return "", ErrContainerUnknown return "", ErrContainerUnknown
} }
// Requires startReading or startWriting.
func (r *containerStore) Exists(id string) bool { func (r *containerStore) Exists(id string) bool {
_, ok := r.lookup(id) _, ok := r.lookup(id)
return ok return ok
} }
// Requires startReading or startWriting.
func (r *containerStore) BigData(id, key string) ([]byte, error) { func (r *containerStore) BigData(id, key string) ([]byte, error) {
if key == "" { if key == "" {
return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName) return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName)
@ -568,6 +827,7 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
return os.ReadFile(r.datapath(c.ID, key)) return os.ReadFile(r.datapath(c.ID, key))
} }
// Requires startWriting. Yes, really, WRITING (see SetBigData).
func (r *containerStore) BigDataSize(id, key string) (int64, error) { func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if key == "" { if key == "" {
return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName) return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName)
@ -576,10 +836,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if !ok { if !ok {
return -1, ErrContainerUnknown return -1, ErrContainerUnknown
} }
if c.BigDataSizes == nil { if size, ok := c.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
c.BigDataSizes = make(map[string]int64)
}
if size, ok := c.BigDataSizes[key]; ok {
return size, nil return size, nil
} }
if data, err := r.BigData(id, key); err == nil && data != nil { if data, err := r.BigData(id, key); err == nil && data != nil {
@ -598,6 +855,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
return -1, ErrSizeUnknown return -1, ErrSizeUnknown
} }
// Requires startWriting. Yes, really, WRITING (see SetBigData).
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" { if key == "" {
return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName) return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName)
@ -606,10 +864,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if !ok { if !ok {
return "", ErrContainerUnknown return "", ErrContainerUnknown
} }
if c.BigDataDigests == nil { if d, ok := c.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
c.BigDataDigests = make(map[string]digest.Digest)
}
if d, ok := c.BigDataDigests[key]; ok {
return d, nil return d, nil
} }
if data, err := r.BigData(id, key); err == nil && data != nil { if data, err := r.BigData(id, key); err == nil && data != nil {
@ -628,6 +883,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
return "", ErrDigestUnknown return "", ErrDigestUnknown
} }
// Requires startReading or startWriting.
func (r *containerStore) BigDataNames(id string) ([]string, error) { func (r *containerStore) BigDataNames(id string) ([]string, error) {
c, ok := r.lookup(id) c, ok := r.lookup(id)
if !ok { if !ok {
@ -636,6 +892,7 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
return copyStringSlice(c.BigDataNames), nil return copyStringSlice(c.BigDataNames), nil
} }
// Requires startWriting.
func (r *containerStore) SetBigData(id, key string, data []byte) error { func (r *containerStore) SetBigData(id, key string, data []byte) error {
if key == "" { if key == "" {
return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName) return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName)
@ -676,12 +933,13 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
save = true save = true
} }
if save { if save {
err = r.Save() err = r.saveFor(c)
} }
} }
return err return err
} }
// Requires startWriting.
func (r *containerStore) Wipe() error { func (r *containerStore) Wipe() error {
ids := make([]string, 0, len(r.byid)) ids := make([]string, 0, len(r.byid))
for id := range r.byid { for id := range r.byid {

View File

@ -251,6 +251,11 @@ func (a *Driver) Exists(id string) bool {
return true return true
} }
// List layers (not including additional image stores)
func (a *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (a *Driver) AdditionalImageStores() []string { func (a *Driver) AdditionalImageStores() []string {
return nil return nil

View File

@ -6,6 +6,9 @@ package btrfs
/* /*
#include <stdlib.h> #include <stdlib.h>
#include <dirent.h> #include <dirent.h>
// keep struct field name compatible with btrfs-progs < 6.1.
#define max_referenced max_rfer
#include <btrfs/ioctl.h> #include <btrfs/ioctl.h>
#include <btrfs/ctree.h> #include <btrfs/ctree.h>
@ -382,7 +385,7 @@ func subvolLimitQgroup(path string, size uint64) error {
defer closeDir(dir) defer closeDir(dir)
var args C.struct_btrfs_ioctl_qgroup_limit_args var args C.struct_btrfs_ioctl_qgroup_limit_args
args.lim.max_referenced = C.__u64(size) args.lim.max_rfer = C.__u64(size)
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args))) uintptr(unsafe.Pointer(&args)))
@ -676,6 +679,11 @@ func (d *Driver) Exists(id string) bool {
return err == nil return err == nil
} }
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string { func (d *Driver) AdditionalImageStores() []string {
return nil return nil

View File

@ -58,6 +58,11 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
} }
infoOp(m) infoOp(m)
count := m.count count := m.count
if count <= 0 {
// If the mounted path has been decremented enough have no references,
// then its entry can be removed.
delete(c.counts, path)
}
c.mu.Unlock() c.mu.Unlock()
return count return count
} }

View File

@ -267,6 +267,11 @@ func (d *Driver) Exists(id string) bool {
return d.DeviceSet.HasDevice(id) return d.DeviceSet.HasDevice(id)
} }
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string { func (d *Driver) AdditionalImageStores() []string {
return nil return nil

View File

@ -109,6 +109,9 @@ type ProtoDriver interface {
// Exists returns whether a filesystem layer with the specified // Exists returns whether a filesystem layer with the specified
// ID exists on this driver. // ID exists on this driver.
Exists(id string) bool Exists(id string) bool
// Returns a list of layer ids that exist on this driver (does not include
// additional storage layers). Not supported by all backends.
ListLayers() ([]string, error)
// Status returns a set of key-value pairs which give low // Status returns a set of key-value pairs which give low
// level diagnostic status about this driver. // level diagnostic status about this driver.
Status() [][2]string Status() [][2]string
@ -319,6 +322,7 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) {
type Options struct { type Options struct {
Root string Root string
RunRoot string RunRoot string
DriverPriority []string
DriverOptions []string DriverOptions []string
UIDMaps []idtools.IDMap UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap GIDMaps []idtools.IDMap
@ -334,9 +338,18 @@ func New(name string, config Options) (Driver, error) {
// Guess for prior driver // Guess for prior driver
driversMap := scanPriorDrivers(config.Root) driversMap := scanPriorDrivers(config.Root)
for _, name := range priority {
if name == "vfs" { // use the supplied priority list unless it is empty
// don't use vfs even if there is state present. prioList := config.DriverPriority
if len(prioList) == 0 {
prioList = priority
}
for _, name := range prioList {
if name == "vfs" && len(config.DriverPriority) == 0 {
// don't use vfs even if there is state present and vfs
// has not been explicitly added to the override driver
// priority list
continue continue
} }
if _, prior := driversMap[name]; prior { if _, prior := driversMap[name]; prior {
@ -369,7 +382,7 @@ func New(name string, config Options) (Driver, error) {
} }
// Check for priority drivers first // Check for priority drivers first
for _, name := range priority { for _, name := range prioList {
driver, err := getBuiltinDriver(name, config.Root, config) driver, err := getBuiltinDriver(name, config.Root, config)
if err != nil { if err != nil {
if isDriverNotSupported(err) { if isDriverNotSupported(err) {

View File

@ -17,6 +17,7 @@ import (
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
"unicode"
graphdriver "github.com/containers/storage/drivers" graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/drivers/overlayutils" "github.com/containers/storage/drivers/overlayutils"
@ -356,9 +357,9 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if opts.forceMask != nil { if opts.forceMask != nil {
return nil, errors.New("'force_mask' is supported only with 'mount_program'") return nil, errors.New("'force_mask' is supported only with 'mount_program'")
} }
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs // check if they are running over btrfs, aufs, overlay, or ecryptfs
switch fsMagic { switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS) return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS)
} }
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) { if unshare.IsRootless() && isNetworkFileSystem(fsMagic) {
@ -1201,6 +1202,9 @@ func (d *Driver) Remove(id string) error {
if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) {
return err return err
} }
if d.quotaCtl != nil {
d.quotaCtl.ClearQuota(dir)
}
return nil return nil
} }
@ -1697,6 +1701,40 @@ func (d *Driver) Exists(id string) bool {
return err == nil return err == nil
} }
func nameLooksLikeID(name string) bool {
if len(name) != 64 {
return false
}
for _, c := range name {
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
return false
}
}
return true
}
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
entries, err := os.ReadDir(d.home)
if err != nil {
return nil, err
}
layers := make([]string, 0)
for _, entry := range entries {
id := entry.Name()
// Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) {
continue
}
layers = append(layers, id)
}
return layers, err
}
// isParent returns if the passed in parent is the direct parent of the passed in layer // isParent returns if the passed in parent is the direct parent of the passed in layer
func (d *Driver) isParent(id, parent string) bool { func (d *Driver) isParent(id, parent string) bool {
lowers, err := d.getLowerDirs(id) lowers, err := d.getLowerDirs(id)

View File

@ -211,6 +211,12 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
return q.setProjectQuota(projectID, quota) return q.setProjectQuota(projectID, quota)
} }
// ClearQuota removes the map entry in the quotas map for targetPath.
// It does so to prevent the map leaking entries as directories are deleted.
func (q *Control) ClearQuota(targetPath string) {
delete(q.quotas, targetPath)
}
// setProjectQuota - set the quota for project id on xfs block device // setProjectQuota - set the quota for project id on xfs block device
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
var d C.fs_disk_quota_t var d C.fs_disk_quota_t

View File

@ -8,6 +8,7 @@ import (
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
"unicode"
graphdriver "github.com/containers/storage/drivers" graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
@ -265,6 +266,40 @@ func (d *Driver) Exists(id string) bool {
return err == nil return err == nil
} }
func nameLooksLikeID(name string) bool {
if len(name) != 64 {
return false
}
for _, c := range name {
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
return false
}
}
return true
}
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
entries, err := os.ReadDir(d.homes[0])
if err != nil {
return nil, err
}
layers := make([]string, 0)
for _, entry := range entries {
id := entry.Name()
// Does it look like a datadir directory?
if !entry.IsDir() || !nameLooksLikeID(id) {
continue
}
layers = append(layers, id)
}
return layers, err
}
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string { func (d *Driver) AdditionalImageStores() []string {
if len(d.homes) > 1 { if len(d.homes) > 1 {

View File

@ -185,6 +185,11 @@ func (d *Driver) Exists(id string) bool {
return result return result
} }
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// CreateFromTemplate creates a layer with the same contents and parent as another layer. // CreateFromTemplate creates a layer with the same contents and parent as another layer.
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)

View File

@ -57,12 +57,12 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites) return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites)
} }
file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600) file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0600)
if err != nil { if err != nil {
logger.Debugf("cannot open /dev/zfs: %v", err) logger.Debugf("cannot open /dev/zfs: %v", err)
return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites) return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites)
} }
defer file.Close() defer unix.Close(file)
options, err := parseOptions(opt.DriverOptions) options, err := parseOptions(opt.DriverOptions)
if err != nil { if err != nil {
@ -506,6 +506,11 @@ func (d *Driver) Exists(id string) bool {
return d.filesystemsCache[d.zfsPath(id)] return d.filesystemsCache[d.zfsPath(id)]
} }
// List layers (not including additional image stores)
func (d *Driver) ListLayers() ([]string, error) {
return nil, graphdriver.ErrNotSupported
}
// AdditionalImageStores returns additional image stores supported by the driver // AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string { func (d *Driver) AdditionalImageStores() []string {
return nil return nil

View File

@ -1,11 +1,11 @@
package storage package storage
import ( import (
"errors"
"fmt" "fmt"
"strings" "strings"
"github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/types"
"github.com/google/go-intervals/intervalset" "github.com/google/go-intervals/intervalset"
) )
@ -116,7 +116,7 @@ func (s *idSet) findAvailable(n int) (*idSet, error) {
n -= i.length() n -= i.length()
} }
if n > 0 { if n > 0 {
return nil, errors.New("could not find enough available IDs") return nil, types.ErrNoAvailableIDs
} }
return &idSet{set: intervalset.NewImmutableSet(intervals)}, nil return &idSet{set: intervalset.NewImmutableSet(intervals)}, nil
} }

View File

@ -9,6 +9,7 @@ import (
"time" "time"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils" "github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/truncindex" "github.com/containers/storage/pkg/truncindex"
@ -148,19 +149,28 @@ type rwImageStore interface {
// Delete removes the record of the image. // Delete removes the record of the image.
Delete(id string) error Delete(id string) error
addMappedTopLayer(id, layer string) error
removeMappedTopLayer(id, layer string) error
// Wipe removes records of all images. // Wipe removes records of all images.
Wipe() error Wipe() error
} }
type imageStore struct { type imageStore struct {
lockfile Locker // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores. // The following fields are only set when constructing imageStore, and must never be modified afterwards.
// They are safe to access without any other locking.
lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores.
dir string dir string
images []*Image
idindex *truncindex.TruncIndex inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held.
byid map[string]*Image // The following fields can only be read/written with read/write ownership of inProcessLock, respectively.
byname map[string]*Image // Almost all users should use startReading() or startWriting().
bydigest map[digest.Digest][]*Image lastWrite lockfile.LastWrite
loadMut sync.Mutex images []*Image
idindex *truncindex.TruncIndex
byid map[string]*Image
byname map[string]*Image
bydigest map[digest.Digest][]*Image
} }
func copyImage(i *Image) *Image { func copyImage(i *Image) *Image {
@ -200,15 +210,17 @@ func copyImageSlice(slice []*Image) []*Image {
// should use startReading() instead. // should use startReading() instead.
func (r *imageStore) startWritingWithReload(canReload bool) error { func (r *imageStore) startWritingWithReload(canReload bool) error {
r.lockfile.Lock() r.lockfile.Lock()
r.inProcessLock.Lock()
succeeded := false succeeded := false
defer func() { defer func() {
if !succeeded { if !succeeded {
r.inProcessLock.Unlock()
r.lockfile.Unlock() r.lockfile.Unlock()
} }
}() }()
if canReload { if canReload {
if err := r.reloadIfChanged(true); err != nil { if _, err := r.reloadIfChanged(true); err != nil {
return err return err
} }
} }
@ -225,6 +237,7 @@ func (r *imageStore) startWriting() error {
// stopWriting releases locks obtained by startWriting. // stopWriting releases locks obtained by startWriting.
func (r *imageStore) stopWriting() { func (r *imageStore) stopWriting() {
r.inProcessLock.Unlock()
r.lockfile.Unlock() r.lockfile.Unlock()
} }
@ -234,21 +247,94 @@ func (r *imageStore) stopWriting() {
// This is an internal implementation detail of imageStore construction, every other caller // This is an internal implementation detail of imageStore construction, every other caller
// should use startReading() instead. // should use startReading() instead.
func (r *imageStore) startReadingWithReload(canReload bool) error { func (r *imageStore) startReadingWithReload(canReload bool) error {
// inProcessLocked calls the nested function with r.inProcessLock held for writing.
inProcessLocked := func(fn func() error) error {
r.inProcessLock.Lock()
defer r.inProcessLock.Unlock()
return fn()
}
r.lockfile.RLock() r.lockfile.RLock()
succeeded := false unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
defer func() { defer func() {
if !succeeded { if unlockFn != nil {
r.lockfile.Unlock() unlockFn()
} }
}() }()
r.inProcessLock.RLock()
unlockFn = r.stopReading
if canReload { if canReload {
if err := r.reloadIfChanged(false); err != nil { // If we are lucky, we can just hold the read locks, check that we are fresh, and continue.
_, modified, err := r.modified()
if err != nil {
return err return err
} }
if modified {
// We are unlucky, and need to reload.
// NOTE: Multiple goroutines can get to this place approximately simultaneously.
r.inProcessLock.RUnlock()
unlockFn = r.lockfile.Unlock
// r.lastWrite can change at this point if another goroutine reloads the store before us. Thats why we dont unconditionally
// trigger a load below; we (lock and) reloadIfChanged() again.
// First try reloading with r.lockfile held for reading.
// r.inProcessLock will serialize all goroutines that got here;
// each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data.
var tryLockedForWriting bool
if err := inProcessLocked(func() error {
// We could optimize this further: The r.lockfile.GetLastWrite() value shouldnt change as long as we hold r.lockfile,
// so if r.lastWrite was already updated, we dont need to actually read the on-filesystem lock.
var err error
tryLockedForWriting, err = r.reloadIfChanged(false)
return err
}); err != nil {
if !tryLockedForWriting {
return err
}
// Not good enough, we need r.lockfile held for writing. So, lets do that.
unlockFn()
unlockFn = nil
r.lockfile.Lock()
unlockFn = r.lockfile.Unlock
if err := inProcessLocked(func() error {
_, err := r.reloadIfChanged(true)
return err
}); err != nil {
return err
}
unlockFn()
unlockFn = nil
r.lockfile.RLock()
unlockFn = r.lockfile.Unlock
// We need to check for a reload once more because the on-disk state could have been modified
// after we released the lock.
// If that, _again_, finds inconsistent state, just give up.
// We could, plausibly, retry a few times, but that inconsistent state (duplicate image names)
// shouldnt be saved (by correct implementations) in the first place.
if err := inProcessLocked(func() error {
_, err := r.reloadIfChanged(false)
return err
}); err != nil {
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
}
}
// NOTE that we hold neither a read nor write inProcessLock at this point. Thats fine in ordinary operation, because
// the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock
// protects us against in-process writers modifying data.
// In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date
// and 2) access to the in-memory data is not racy;
// but we cant protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state.
r.inProcessLock.RLock()
}
} }
succeeded = true unlockFn = nil
return nil return nil
} }
@ -260,24 +346,48 @@ func (r *imageStore) startReading() error {
// stopReading releases locks obtained by startReading. // stopReading releases locks obtained by startReading.
func (r *imageStore) stopReading() { func (r *imageStore) stopReading() {
r.inProcessLock.RUnlock()
r.lockfile.Unlock() r.lockfile.Unlock()
} }
// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store),
// and a lockfile.LastWrite value for that update.
//
// The caller must hold r.lockfile for reading _or_ writing.
// The caller must hold r.inProcessLock for reading or writing.
func (r *imageStore) modified() (lockfile.LastWrite, bool, error) {
return r.lockfile.ModifiedSince(r.lastWrite)
}
// reloadIfChanged reloads the contents of the store from disk if it is changed. // reloadIfChanged reloads the contents of the store from disk if it is changed.
// //
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true // The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing. // if it is held for writing.
func (r *imageStore) reloadIfChanged(lockedForWriting bool) error { //
r.loadMut.Lock() // The caller must hold r.inProcessLock for WRITING.
defer r.loadMut.Unlock() //
// If !lockedForWriting and this function fails, the return value indicates whether
modified, err := r.lockfile.Modified() // reloadIfChanged() with lockedForWriting could succeed.
if err == nil && modified { func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
return r.load(lockedForWriting) lastWrite, modified, err := r.modified()
if err != nil {
return false, err
} }
return err // We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load()
// and modify no fields, to ensure they see fresh data:
// r.lockfile.Modified() only returns true once per change. Without an exclusive lock,
// one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could
// see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale.
if modified {
if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
}
r.lastWrite = lastWrite
}
return false, nil
} }
// Requires startReading or startWriting.
func (r *imageStore) Images() ([]Image, error) { func (r *imageStore) Images() ([]Image, error) {
images := make([]Image, len(r.images)) images := make([]Image, len(r.images))
for i := range r.images { for i := range r.images {
@ -308,6 +418,7 @@ func bigDataNameIsManifest(name string) bool {
// recomputeDigests takes a fixed digest and a name-to-digest map and builds a // recomputeDigests takes a fixed digest and a name-to-digest map and builds a
// list of the unique values that would identify the image. // list of the unique values that would identify the image.
// The caller must hold r.inProcessLock for writing.
func (i *Image) recomputeDigests() error { func (i *Image) recomputeDigests() error {
validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1) validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1)
digests := make(map[digest.Digest]struct{}) digests := make(map[digest.Digest]struct{})
@ -340,38 +451,45 @@ func (i *Image) recomputeDigests() error {
// load reloads the contents of the store from disk. // load reloads the contents of the store from disk.
// //
// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
// manage r.lastWrite.
//
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true // The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
// if it is held for writing. // if it is held for writing.
func (r *imageStore) load(lockedForWriting bool) error { // The caller must hold r.inProcessLock for WRITING.
shouldSave := false //
// If !lockedForWriting and this function fails, the return value indicates whether
// retrying with lockedForWriting could succeed.
func (r *imageStore) load(lockedForWriting bool) (bool, error) {
rpath := r.imagespath() rpath := r.imagespath()
data, err := os.ReadFile(rpath) data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return err return false, err
} }
images := []*Image{} images := []*Image{}
if len(data) != 0 { if len(data) != 0 {
if err := json.Unmarshal(data, &images); err != nil { if err := json.Unmarshal(data, &images); err != nil {
return fmt.Errorf("loading %q: %w", rpath, err) return false, fmt.Errorf("loading %q: %w", rpath, err)
} }
} }
idlist := make([]string, 0, len(images)) idlist := make([]string, 0, len(images))
ids := make(map[string]*Image) ids := make(map[string]*Image)
names := make(map[string]*Image) names := make(map[string]*Image)
digests := make(map[digest.Digest][]*Image) digests := make(map[digest.Digest][]*Image)
var errorToResolveBySaving error // == nil
for n, image := range images { for n, image := range images {
ids[image.ID] = images[n] ids[image.ID] = images[n]
idlist = append(idlist, image.ID) idlist = append(idlist, image.ID)
for _, name := range image.Names { for _, name := range image.Names {
if conflict, ok := names[name]; ok { if conflict, ok := names[name]; ok {
r.removeName(conflict, name) r.removeName(conflict, name)
shouldSave = true errorToResolveBySaving = ErrDuplicateImageNames
} }
} }
// Compute the digest list. // Compute the digest list.
if err := image.recomputeDigests(); err != nil { if err := image.recomputeDigests(); err != nil {
return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err) return false, fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
} }
for _, name := range image.Names { for _, name := range image.Names {
names[name] = image names[name] = image
@ -383,23 +501,28 @@ func (r *imageStore) load(lockedForWriting bool) error {
image.ReadOnly = !r.lockfile.IsReadWrite() image.ReadOnly = !r.lockfile.IsReadWrite()
} }
if shouldSave && (!r.lockfile.IsReadWrite() || !lockedForWriting) { if errorToResolveBySaving != nil {
// Eventually, the callers should be modified to retry with a write lock if IsReadWrite && !lockedForWriting, instead. if !r.lockfile.IsReadWrite() {
return ErrDuplicateImageNames return false, errorToResolveBySaving
}
if !lockedForWriting {
return true, errorToResolveBySaving
}
} }
r.images = images r.images = images
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store. r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
r.byid = ids r.byid = ids
r.byname = names r.byname = names
r.bydigest = digests r.bydigest = digests
if shouldSave { if errorToResolveBySaving != nil {
return r.Save() return false, r.Save()
} }
return nil return false, nil
} }
// Save saves the contents of the store to disk. It should be called with // Save saves the contents of the store to disk.
// the lock held, locked for writing. // The caller must hold r.lockfile locked for writing.
// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
func (r *imageStore) Save() error { func (r *imageStore) Save() error {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
@ -416,20 +539,26 @@ func (r *imageStore) Save() error {
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil { if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
return err return err
} }
return r.lockfile.Touch() lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
return nil
} }
func newImageStore(dir string) (rwImageStore, error) { func newImageStore(dir string) (rwImageStore, error) {
if err := os.MkdirAll(dir, 0700); err != nil { if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err return nil, err
} }
lockfile, err := GetLockfile(filepath.Join(dir, "images.lock")) lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
istore := imageStore{ istore := imageStore{
lockfile: lockfile, lockfile: lockfile,
dir: dir, dir: dir,
images: []*Image{}, images: []*Image{},
byid: make(map[string]*Image), byid: make(map[string]*Image),
byname: make(map[string]*Image), byname: make(map[string]*Image),
@ -439,20 +568,25 @@ func newImageStore(dir string) (rwImageStore, error) {
return nil, err return nil, err
} }
defer istore.stopWriting() defer istore.stopWriting()
if err := istore.load(true); err != nil { istore.lastWrite, err = istore.lockfile.GetLastWrite()
if err != nil {
return nil, err
}
if _, err := istore.load(true); err != nil {
return nil, err return nil, err
} }
return &istore, nil return &istore, nil
} }
func newROImageStore(dir string) (roImageStore, error) { func newROImageStore(dir string) (roImageStore, error) {
lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock")) lockfile, err := lockfile.GetROLockFile(filepath.Join(dir, "images.lock"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
istore := imageStore{ istore := imageStore{
lockfile: lockfile, lockfile: lockfile,
dir: dir, dir: dir,
images: []*Image{}, images: []*Image{},
byid: make(map[string]*Image), byid: make(map[string]*Image),
byname: make(map[string]*Image), byname: make(map[string]*Image),
@ -462,12 +596,17 @@ func newROImageStore(dir string) (roImageStore, error) {
return nil, err return nil, err
} }
defer istore.stopReading() defer istore.stopReading()
if err := istore.load(false); err != nil { istore.lastWrite, err = istore.lockfile.GetLastWrite()
if err != nil {
return nil, err
}
if _, err := istore.load(false); err != nil {
return nil, err return nil, err
} }
return &istore, nil return &istore, nil
} }
// Requires startReading or startWriting.
func (r *imageStore) lookup(id string) (*Image, bool) { func (r *imageStore) lookup(id string) (*Image, bool) {
if image, ok := r.byid[id]; ok { if image, ok := r.byid[id]; ok {
return image, ok return image, ok
@ -480,6 +619,7 @@ func (r *imageStore) lookup(id string) (*Image, bool) {
return nil, false return nil, false
} }
// Requires startWriting.
func (r *imageStore) ClearFlag(id string, flag string) error { func (r *imageStore) ClearFlag(id string, flag string) error {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
@ -492,6 +632,7 @@ func (r *imageStore) ClearFlag(id string, flag string) error {
return r.Save() return r.Save()
} }
// Requires startWriting.
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
@ -507,6 +648,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
return r.Save() return r.Save()
} }
// Requires startWriting.
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
@ -566,6 +708,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
return image, err return image, err
} }
// Requires startWriting.
func (r *imageStore) addMappedTopLayer(id, layer string) error { func (r *imageStore) addMappedTopLayer(id, layer string) error {
if image, ok := r.lookup(id); ok { if image, ok := r.lookup(id); ok {
image.MappedTopLayers = append(image.MappedTopLayers, layer) image.MappedTopLayers = append(image.MappedTopLayers, layer)
@ -574,6 +717,7 @@ func (r *imageStore) addMappedTopLayer(id, layer string) error {
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
} }
// Requires startWriting.
func (r *imageStore) removeMappedTopLayer(id, layer string) error { func (r *imageStore) removeMappedTopLayer(id, layer string) error {
if image, ok := r.lookup(id); ok { if image, ok := r.lookup(id); ok {
initialLen := len(image.MappedTopLayers) initialLen := len(image.MappedTopLayers)
@ -587,6 +731,7 @@ func (r *imageStore) removeMappedTopLayer(id, layer string) error {
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
} }
// Requires startReading or startWriting.
func (r *imageStore) Metadata(id string) (string, error) { func (r *imageStore) Metadata(id string) (string, error) {
if image, ok := r.lookup(id); ok { if image, ok := r.lookup(id); ok {
return image.Metadata, nil return image.Metadata, nil
@ -594,6 +739,7 @@ func (r *imageStore) Metadata(id string) (string, error) {
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
} }
// Requires startWriting.
func (r *imageStore) SetMetadata(id, metadata string) error { func (r *imageStore) SetMetadata(id, metadata string) error {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
@ -605,14 +751,17 @@ func (r *imageStore) SetMetadata(id, metadata string) error {
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
} }
// The caller must hold r.inProcessLock for writing.
func (r *imageStore) removeName(image *Image, name string) { func (r *imageStore) removeName(image *Image, name string) {
image.Names = stringSliceWithoutValue(image.Names, name) image.Names = stringSliceWithoutValue(image.Names, name)
} }
// The caller must hold r.inProcessLock for writing.
func (i *Image) addNameToHistory(name string) { func (i *Image) addNameToHistory(name string) {
i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...)) i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...))
} }
// Requires startWriting.
func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
@ -640,6 +789,7 @@ func (r *imageStore) updateNames(id string, names []string, op updateNameOperati
return r.Save() return r.Save()
} }
// Requires startWriting.
func (r *imageStore) Delete(id string) error { func (r *imageStore) Delete(id string) error {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
@ -687,6 +837,7 @@ func (r *imageStore) Delete(id string) error {
return nil return nil
} }
// Requires startReading or startWriting.
func (r *imageStore) Get(id string) (*Image, error) { func (r *imageStore) Get(id string) (*Image, error) {
if image, ok := r.lookup(id); ok { if image, ok := r.lookup(id); ok {
return copyImage(image), nil return copyImage(image), nil
@ -694,11 +845,13 @@ func (r *imageStore) Get(id string) (*Image, error) {
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
} }
// Requires startReading or startWriting.
func (r *imageStore) Exists(id string) bool { func (r *imageStore) Exists(id string) bool {
_, ok := r.lookup(id) _, ok := r.lookup(id)
return ok return ok
} }
// Requires startReading or startWriting.
func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
if images, ok := r.bydigest[d]; ok { if images, ok := r.bydigest[d]; ok {
return copyImageSlice(images), nil return copyImageSlice(images), nil
@ -706,6 +859,7 @@ func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown) return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown)
} }
// Requires startReading or startWriting.
func (r *imageStore) BigData(id, key string) ([]byte, error) { func (r *imageStore) BigData(id, key string) ([]byte, error) {
if key == "" { if key == "" {
return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName) return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName)
@ -717,6 +871,7 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) {
return os.ReadFile(r.datapath(image.ID, key)) return os.ReadFile(r.datapath(image.ID, key))
} }
// Requires startReading or startWriting.
func (r *imageStore) BigDataSize(id, key string) (int64, error) { func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if key == "" { if key == "" {
return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName) return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName)
@ -725,10 +880,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if !ok { if !ok {
return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
} }
if image.BigDataSizes == nil { if size, ok := image.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
image.BigDataSizes = make(map[string]int64)
}
if size, ok := image.BigDataSizes[key]; ok {
return size, nil return size, nil
} }
if data, err := r.BigData(id, key); err == nil && data != nil { if data, err := r.BigData(id, key); err == nil && data != nil {
@ -737,6 +889,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
return -1, ErrSizeUnknown return -1, ErrSizeUnknown
} }
// Requires startReading or startWriting.
func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" { if key == "" {
return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName) return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName)
@ -745,15 +898,13 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if !ok { if !ok {
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
} }
if image.BigDataDigests == nil { if d, ok := image.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataDigests == nil.
image.BigDataDigests = make(map[string]digest.Digest)
}
if d, ok := image.BigDataDigests[key]; ok {
return d, nil return d, nil
} }
return "", ErrDigestUnknown return "", ErrDigestUnknown
} }
// Requires startReading or startWriting.
func (r *imageStore) BigDataNames(id string) ([]string, error) { func (r *imageStore) BigDataNames(id string) ([]string, error) {
image, ok := r.lookup(id) image, ok := r.lookup(id)
if !ok { if !ok {
@ -773,6 +924,7 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
return modified return modified
} }
// Requires startWriting.
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
if key == "" { if key == "" {
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName) return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
@ -857,6 +1009,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
return err return err
} }
// Requires startWriting.
func (r *imageStore) Wipe() error { func (r *imageStore) Wipe() error {
if !r.lockfile.IsReadWrite() { if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)

File diff suppressed because it is too large Load Diff

View File

@ -4,12 +4,15 @@ import (
"github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/lockfile"
) )
type Locker = lockfile.Locker // Deprecated: Use lockfile.*LockFile.
type Locker = lockfile.Locker //lint:ignore SA1019 // lockfile.Locker is deprecated
// Deprecated: Use lockfile.GetLockFile.
func GetLockfile(path string) (lockfile.Locker, error) { func GetLockfile(path string) (lockfile.Locker, error) {
return lockfile.GetLockfile(path) return lockfile.GetLockfile(path)
} }
// Deprecated: Use lockfile.GetROLockFile.
func GetROLockfile(path string) (lockfile.Locker, error) { func GetROLockfile(path string) (lockfile.Locker, error) {
return lockfile.GetROLockfile(path) return lockfile.GetROLockfile(path)
} }

View File

@ -43,7 +43,12 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) {
root := newRootFileInfo(idMappings) root := newRootFileInfo(idMappings)
err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { sourceStat, err := system.Lstat(sourceDir)
if err != nil {
return nil, err
}
err = filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error {
if err != nil { if err != nil {
return err return err
} }
@ -86,8 +91,12 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf
if err != nil { if err != nil {
return err return err
} }
info.stat = s
if s.Dev() != sourceStat.Dev() {
return filepath.SkipDir
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability") info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info parent.children[info.name] = info

View File

@ -2,11 +2,12 @@ package idtools
import ( import (
"fmt" "fmt"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"github.com/containers/storage/pkg/regexp"
) )
// add a user and/or group to Linux /etc/passwd, /etc/group using standard // add a user and/or group to Linux /etc/passwd, /etc/group using standard
@ -24,7 +25,7 @@ var (
"usermod": "-%s %d-%d %s", "usermod": "-%s %d-%d %s",
} }
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) idOutRegexp = regexp.Delayed(`uid=([0-9]+).*gid=([0-9]+)`)
// default length for a UID/GID subordinate range // default length for a UID/GID subordinate range
defaultRangeLen = 65536 defaultRangeLen = 65536
defaultRangeStart = 100000 defaultRangeStart = 100000

View File

@ -4,6 +4,7 @@ import (
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"time"
) )
// AtomicFileWriterOptions specifies options for creating the atomic file writer. // AtomicFileWriterOptions specifies options for creating the atomic file writer.
@ -13,6 +14,9 @@ type AtomicFileWriterOptions struct {
// storage after it has been written and before it is moved to // storage after it has been written and before it is moved to
// the specified path. // the specified path.
NoSync bool NoSync bool
// On successful return from Close() this is set to the mtime of the
// newly written file.
ModTime time.Time
} }
var defaultWriterOptions = AtomicFileWriterOptions{} var defaultWriterOptions = AtomicFileWriterOptions{}
@ -61,8 +65,8 @@ func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, err
} }
// AtomicWriteFile atomically writes data to a file named by filename. // AtomicWriteFile atomically writes data to a file named by filename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opts *AtomicFileWriterOptions) error {
f, err := newAtomicFileWriter(filename, perm, nil) f, err := newAtomicFileWriter(filename, perm, opts)
if err != nil { if err != nil {
return err return err
} }
@ -74,15 +78,25 @@ func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
if err1 := f.Close(); err == nil { if err1 := f.Close(); err == nil {
err = err1 err = err1
} }
if opts != nil {
opts.ModTime = f.modTime
}
return err return err
} }
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
return AtomicWriteFileWithOpts(filename, data, perm, nil)
}
type atomicFileWriter struct { type atomicFileWriter struct {
f *os.File f *os.File
fn string fn string
writeErr error writeErr error
perm os.FileMode perm os.FileMode
noSync bool noSync bool
modTime time.Time
} }
func (w *atomicFileWriter) Write(dt []byte) (int, error) { func (w *atomicFileWriter) Write(dt []byte) (int, error) {
@ -105,9 +119,25 @@ func (w *atomicFileWriter) Close() (retErr error) {
return err return err
} }
} }
// fstat before closing the fd
info, statErr := w.f.Stat()
if statErr == nil {
w.modTime = info.ModTime()
}
// We delay error reporting until after the real call to close()
// to match the traditional linux close() behaviour that an fd
// is invalid (closed) even if close returns failure. While
// weird, this allows a well defined way to not leak open fds.
if err := w.f.Close(); err != nil { if err := w.f.Close(); err != nil {
return err return err
} }
if statErr != nil {
return statErr
}
if err := os.Chmod(w.f.Name(), w.perm); err != nil { if err := os.Chmod(w.f.Name(), w.perm); err != nil {
return err return err
} }

View File

@ -10,6 +10,8 @@ import (
// A Locker represents a file lock where the file is used to cache an // A Locker represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected // identifier of the last party that made changes to whatever's being protected
// by the lock. // by the lock.
//
// Deprecated: Refer directly to *LockFile, the provided implementation, instead.
type Locker interface { type Locker interface {
// Acquire a writer lock. // Acquire a writer lock.
// The default unix implementation panics if: // The default unix implementation panics if:
@ -28,10 +30,13 @@ type Locker interface {
// Touch records, for others sharing the lock, that the caller was the // Touch records, for others sharing the lock, that the caller was the
// last writer. It should only be called with the lock held. // last writer. It should only be called with the lock held.
//
// Deprecated: Use *LockFile.RecordWrite.
Touch() error Touch() error
// Modified() checks if the most recent writer was a party other than the // Modified() checks if the most recent writer was a party other than the
// last recorded writer. It should only be called with the lock held. // last recorded writer. It should only be called with the lock held.
// Deprecated: Use *LockFile.ModifiedSince.
Modified() (bool, error) Modified() (bool, error)
// TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time.
@ -44,64 +49,82 @@ type Locker interface {
// It might do nothing at all, or it may panic if the caller is not the owner of this lock. // It might do nothing at all, or it may panic if the caller is not the owner of this lock.
AssertLocked() AssertLocked()
// AssertLocked() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking. // AssertLockedForWriting() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking.
// It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing. // It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing.
AssertLockedForWriting() AssertLockedForWriting()
} }
var ( var (
lockfiles map[string]Locker lockFiles map[string]*LockFile
lockfilesLock sync.Mutex lockFilesLock sync.Mutex
) )
// GetLockFile opens a read-write lock file, creating it if necessary. The
// *LockFile object may already be locked if the path has already been requested
// by the current process.
func GetLockFile(path string) (*LockFile, error) {
return getLockfile(path, false)
}
// GetLockfile opens a read-write lock file, creating it if necessary. The // GetLockfile opens a read-write lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested // Locker object may already be locked if the path has already been requested
// by the current process. // by the current process.
//
// Deprecated: Use GetLockFile
func GetLockfile(path string) (Locker, error) { func GetLockfile(path string) (Locker, error) {
return getLockfile(path, false) return GetLockFile(path)
}
// GetROLockFile opens a read-only lock file, creating it if necessary. The
// *LockFile object may already be locked if the path has already been requested
// by the current process.
func GetROLockFile(path string) (*LockFile, error) {
return getLockfile(path, true)
} }
// GetROLockfile opens a read-only lock file, creating it if necessary. The // GetROLockfile opens a read-only lock file, creating it if necessary. The
// Locker object may already be locked if the path has already been requested // Locker object may already be locked if the path has already been requested
// by the current process. // by the current process.
//
// Deprecated: Use GetROLockFile
func GetROLockfile(path string) (Locker, error) { func GetROLockfile(path string) (Locker, error) {
return getLockfile(path, true) return GetROLockFile(path)
} }
// getLockfile returns a Locker object, possibly (depending on the platform) // getLockFile returns a *LockFile object, possibly (depending on the platform)
// working inter-process, and associated with the specified path. // working inter-process, and associated with the specified path.
// //
// If ro, the lock is a read-write lock and the returned Locker should correspond to the // If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, // “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. // or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
// //
// WARNING: // WARNING:
// - The lock may or MAY NOT be inter-process. // - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path. // - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive. // - Even if ro, the lock MAY be exclusive.
func getLockfile(path string, ro bool) (Locker, error) { func getLockfile(path string, ro bool) (*LockFile, error) {
lockfilesLock.Lock() lockFilesLock.Lock()
defer lockfilesLock.Unlock() defer lockFilesLock.Unlock()
if lockfiles == nil { if lockFiles == nil {
lockfiles = make(map[string]Locker) lockFiles = make(map[string]*LockFile)
} }
cleanPath, err := filepath.Abs(path) cleanPath, err := filepath.Abs(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err) return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err)
} }
if locker, ok := lockfiles[cleanPath]; ok { if lockFile, ok := lockFiles[cleanPath]; ok {
if ro && locker.IsReadWrite() { if ro && lockFile.IsReadWrite() {
return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath) return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath)
} }
if !ro && !locker.IsReadWrite() { if !ro && !lockFile.IsReadWrite() {
return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath) return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath)
} }
return locker, nil return lockFile, nil
} }
locker, err := createLockerForPath(cleanPath, ro) // platform-dependent locker lockFile, err := createLockFileForPath(cleanPath, ro) // platform-dependent LockFile
if err != nil { if err != nil {
return nil, err return nil, err
} }
lockfiles[cleanPath] = locker lockFiles[cleanPath] = lockFile
return locker, nil return lockFile, nil
} }

View File

@ -18,27 +18,48 @@ import (
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
type lockfile struct { // *LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
// The following fields are only set when constructing *LockFile, and must never be modified afterwards.
// They are safe to access without any other locking.
file string
ro bool
// rwMutex serializes concurrent reader-writer acquisitions in the same process space // rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below // stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex stateMutex *sync.Mutex
counter int64 counter int64
file string lw LastWrite // A global value valid as of the last .Touch() or .Modified()
fd uintptr
lw []byte // "last writer"-unique value valid as of the last .Touch() or .Modified(), generated by newLastWriterID()
locktype int16 locktype int16
locked bool locked bool
ro bool // The following fields are only modified on transitions between counter == 0 / counter != 0.
// Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
// In other cases, they need to be protected using stateMutex.
fd uintptr
}
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
//
// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
type LastWrite struct {
// Never modify fields of a LastWrite object; it has value semantics.
state []byte // Contents of the lock file.
} }
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID) const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
var lastWriterIDCounter uint64 // Private state for newLastWriterID var lastWriterIDCounter uint64 // Private state for newLastWriterID
// newLastWriterID returns a new "last writer" ID. // newLastWrite returns a new "last write" ID.
// The value must be different on every call, and also differ from values // The value must be different on every call, and also differ from values
// generated by other processes. // generated by other processes.
func newLastWriterID() []byte { func newLastWrite() LastWrite {
// The ID is (PID, time, per-process counter, random) // The ID is (PID, time, per-process counter, random)
// PID + time represents both a unique process across reboots, // PID + time represents both a unique process across reboots,
// and a specific time within the process; the per-process counter // and a specific time within the process; the per-process counter
@ -60,7 +81,38 @@ func newLastWriterID() []byte {
panic(err) // This shouldn't happen panic(err) // This shouldn't happen
} }
return res return LastWrite{
state: res,
}
}
// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
func newLastWriteFromData(serialized []byte) LastWrite {
if serialized == nil {
panic("newLastWriteFromData with nil data")
}
return LastWrite{
state: serialized,
}
}
// serialize returns bytes to write to the lock file to represent the specified write.
func (lw LastWrite) serialize() []byte {
if lw.state == nil {
panic("LastWrite.serialize on an uninitialized object")
}
return lw.state
}
// Equals returns true if lw matches other
func (lw LastWrite) equals(other LastWrite) bool {
if lw.state == nil {
panic("LastWrite.equals on an uninitialized object")
}
if other.state == nil {
panic("LastWrite.equals with an uninitialized counterparty")
}
return bytes.Equal(lw.state, other.state)
} }
// openLock opens the file at path and returns the corresponding file // openLock opens the file at path and returns the corresponding file
@ -84,7 +136,7 @@ func openLock(path string, ro bool) (fd int, err error) {
// the directory of the lockfile seems to be removed, try to create it // the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) { if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
return fd, fmt.Errorf("creating locker directory: %w", err) return fd, fmt.Errorf("creating lock file directory: %w", err)
} }
return openLock(path, ro) return openLock(path, ro)
@ -93,20 +145,20 @@ func openLock(path string, ro bool) (fd int, err error) {
return fd, &os.PathError{Op: "open", Path: path, Err: err} return fd, &os.PathError{Op: "open", Path: path, Err: err}
} }
// createLockerForPath returns a Locker object, possibly (depending on the platform) // createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path. // working inter-process and associated with the specified path.
// //
// This function will be called at most once for each path value within a single process. // This function will be called at most once for each path value within a single process.
// //
// If ro, the lock is a read-write lock and the returned Locker should correspond to the // If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, // “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. // or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
// //
// WARNING: // WARNING:
// - The lock may or MAY NOT be inter-process. // - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path. // - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive. // - Even if ro, the lock MAY be exclusive.
func createLockerForPath(path string, ro bool) (Locker, error) { func createLockFileForPath(path string, ro bool) (*LockFile, error) {
// Check if we can open the lock. // Check if we can open the lock.
fd, err := openLock(path, ro) fd, err := openLock(path, ro)
if err != nil { if err != nil {
@ -118,19 +170,21 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
if ro { if ro {
locktype = unix.F_RDLCK locktype = unix.F_RDLCK
} }
return &lockfile{ return &LockFile{
stateMutex: &sync.Mutex{}, file: path,
ro: ro,
rwMutex: &sync.RWMutex{}, rwMutex: &sync.RWMutex{},
file: path, stateMutex: &sync.Mutex{},
lw: newLastWriterID(), lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
locktype: int16(locktype), locktype: int16(locktype),
locked: false, locked: false,
ro: ro}, nil }, nil
} }
// lock locks the lockfile via FCTNL(2) based on the specified type and // lock locks the lockfile via FCTNL(2) based on the specified type and
// command. // command.
func (l *lockfile) lock(lType int16) { func (l *LockFile) lock(lType int16) {
lk := unix.Flock_t{ lk := unix.Flock_t{
Type: lType, Type: lType,
Whence: int16(unix.SEEK_SET), Whence: int16(unix.SEEK_SET),
@ -168,7 +222,7 @@ func (l *lockfile) lock(lType int16) {
} }
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one. // Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *lockfile) Lock() { func (l *LockFile) Lock() {
if l.ro { if l.ro {
panic("can't take write lock on read-only lock file") panic("can't take write lock on read-only lock file")
} else { } else {
@ -177,12 +231,12 @@ func (l *lockfile) Lock() {
} }
// LockRead locks the lockfile as a reader. // LockRead locks the lockfile as a reader.
func (l *lockfile) RLock() { func (l *LockFile) RLock() {
l.lock(unix.F_RDLCK) l.lock(unix.F_RDLCK)
} }
// Unlock unlocks the lockfile. // Unlock unlocks the lockfile.
func (l *lockfile) Unlock() { func (l *LockFile) Unlock() {
l.stateMutex.Lock() l.stateMutex.Lock()
if !l.locked { if !l.locked {
// Panic when unlocking an unlocked lock. That's a violation // Panic when unlocking an unlocked lock. That's a violation
@ -213,7 +267,7 @@ func (l *lockfile) Unlock() {
l.stateMutex.Unlock() l.stateMutex.Unlock()
} }
func (l *lockfile) AssertLocked() { func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked. // DO NOT provide a variant that returns the value of l.locked.
// //
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
@ -230,7 +284,7 @@ func (l *lockfile) AssertLocked() {
} }
} }
func (l *lockfile) AssertLockedForWriting() { func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state. // DO NOT provide a variant that returns the current lock state.
// //
// The same caveats as for AssertLocked apply equally. // The same caveats as for AssertLocked apply equally.
@ -242,53 +296,128 @@ func (l *lockfile) AssertLockedForWriting() {
} }
} }
// Touch updates the lock file with the UID of the user. // GetLastWrite returns a LastWrite value corresponding to current state of the lock.
func (l *lockfile) Touch() error { // This is typically called before (_not after_) loading the state when initializing a consumer
// of the data protected by the lock.
// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) GetLastWrite() (LastWrite, error) {
l.AssertLocked()
contents := make([]byte, lastWriterIDSize)
n, err := unix.Pread(int(l.fd), contents, 0)
if err != nil {
return LastWrite{}, err
}
// It is important to handle the partial read case, because
// the initial size of the lock file is zero, which is a valid
// state (no writes yet)
contents = contents[:n]
return newLastWriteFromData(contents), nil
}
// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should keep using the previously-recorded LastWrite value,
// and possibly detecting its own modification as an external one:
//
// lw, err := state.lock.RecordWrite()
// if err != nil { /* fail */ }
// state.lastWrite = lw
//
// The caller must hold the lock for writing.
func (l *LockFile) RecordWrite() (LastWrite, error) {
l.AssertLockedForWriting()
lw := newLastWrite()
lockContents := lw.serialize()
n, err := unix.Pwrite(int(l.fd), lockContents, 0)
if err != nil {
return LastWrite{}, err
}
if n != len(lockContents) {
return LastWrite{}, unix.ENOSPC
}
return lw, nil
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
l.AssertLocked()
currentLW, err := l.GetLastWrite()
if err != nil {
return LastWrite{}, false, err
}
modified := !previous.equals(currentLW)
return currentLW, modified, nil
}
// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
//
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
lw, err := l.RecordWrite()
if err != nil {
return err
}
l.stateMutex.Lock() l.stateMutex.Lock()
if !l.locked || (l.locktype != unix.F_WRLCK) { if !l.locked || (l.locktype != unix.F_WRLCK) {
panic("attempted to update last-writer in lockfile without the write lock") panic("attempted to update last-writer in lockfile without the write lock")
} }
defer l.stateMutex.Unlock() defer l.stateMutex.Unlock()
l.lw = newLastWriterID() l.lw = lw
n, err := unix.Pwrite(int(l.fd), l.lw, 0)
if err != nil {
return err
}
if n != len(l.lw) {
return unix.ENOSPC
}
return nil return nil
} }
// Modified indicates if the lockfile has been updated since the last time it // Modified indicates if the lockfile has been updated since the last time it
// was loaded. // was loaded.
func (l *lockfile) Modified() (bool, error) { // NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
// Callers cannot, in general, rely on this, because that might have happened for some other
// owner of the same *LockFile who created it previously.
//
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
l.stateMutex.Lock() l.stateMutex.Lock()
if !l.locked { if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first") panic("attempted to check last-writer in lockfile without locking it first")
} }
defer l.stateMutex.Unlock() defer l.stateMutex.Unlock()
currentLW := make([]byte, lastWriterIDSize) oldLW := l.lw
n, err := unix.Pread(int(l.fd), currentLW, 0) // Note that this is called with stateMutex held; thats fine because ModifiedSince doesnt need to lock it.
currentLW, modified, err := l.ModifiedSince(oldLW)
if err != nil { if err != nil {
return true, err return true, err
} }
// It is important to handle the partial read case, because
// the initial size of the lock file is zero, which is a valid
// state (no writes yet)
currentLW = currentLW[:n]
oldLW := l.lw
l.lw = currentLW l.lw = currentLW
return !bytes.Equal(currentLW, oldLW), nil return modified, nil
} }
// IsReadWriteLock indicates if the lock file is a read-write lock. // IsReadWriteLock indicates if the lock file is a read-write lock.
func (l *lockfile) IsReadWrite() bool { func (l *LockFile) IsReadWrite() bool {
return !l.ro return !l.ro
} }
// TouchedSince indicates if the lock file has been touched since the specified time // TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool { func (l *LockFile) TouchedSince(when time.Time) bool {
st, err := system.Fstat(int(l.fd)) st, err := system.Fstat(int(l.fd))
if err != nil { if err != nil {
return true return true

View File

@ -9,45 +9,58 @@ import (
"time" "time"
) )
// createLockerForPath returns a Locker object, possibly (depending on the platform) // createLockFileForPath returns a *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path. // working inter-process and associated with the specified path.
// //
// This function will be called at most once for each path value within a single process. // This function will be called at most once for each path value within a single process.
// //
// If ro, the lock is a read-write lock and the returned Locker should correspond to the // If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, // “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. // or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
// //
// WARNING: // WARNING:
// - The lock may or MAY NOT be inter-process. // - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path. // - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive. // - Even if ro, the lock MAY be exclusive.
func createLockerForPath(path string, ro bool) (Locker, error) { func createLockFileForPath(path string, ro bool) (*LockFile, error) {
return &lockfile{locked: false}, nil return &LockFile{locked: false}, nil
} }
type lockfile struct { // *LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
mu sync.Mutex mu sync.Mutex
file string file string
locked bool locked bool
} }
func (l *lockfile) Lock() { // LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
// A default-initialized LastWrite never matches any last write, i.e. it always indicates changes.
type LastWrite struct {
// Nothing: The Windows “implementation” does not actually track writes.
}
func (l *LockFile) Lock() {
l.mu.Lock() l.mu.Lock()
l.locked = true l.locked = true
} }
func (l *lockfile) RLock() { func (l *LockFile) RLock() {
l.mu.Lock() l.mu.Lock()
l.locked = true l.locked = true
} }
func (l *lockfile) Unlock() { func (l *LockFile) Unlock() {
l.locked = false l.locked = false
l.mu.Unlock() l.mu.Unlock()
} }
func (l *lockfile) AssertLocked() { func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked. // DO NOT provide a variant that returns the value of l.locked.
// //
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
@ -59,24 +72,77 @@ func (l *lockfile) AssertLocked() {
} }
} }
func (l *lockfile) AssertLockedForWriting() { func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state. // DO NOT provide a variant that returns the current lock state.
// //
// The same caveats as for AssertLocked apply equally. // The same caveats as for AssertLocked apply equally.
l.AssertLocked() // The current implementation does not distinguish between read and write locks. l.AssertLocked() // The current implementation does not distinguish between read and write locks.
} }
func (l *lockfile) Modified() (bool, error) { // GetLastWrite() returns a LastWrite value corresponding to current state of the lock.
// This is typically called before (_not after_) loading the state when initializing a consumer
// of the data protected by the lock.
// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
//
// The caller must hold the lock (for reading or writing) before this function is called.
func (l *LockFile) GetLastWrite() (LastWrite, error) {
l.AssertLocked()
return LastWrite{}, nil
}
// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should keep using the previously-recorded LastWrite value,
// and possibly detecting its own modification as an external one:
//
// lw, err := state.lock.RecordWrite()
// if err != nil { /* fail */ }
// state.lastWrite = lw
//
// The caller must hold the lock for writing.
func (l *LockFile) RecordWrite() (LastWrite, error) {
return LastWrite{}, nil
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
return LastWrite{}, false, nil
}
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
return false, nil return false, nil
} }
func (l *lockfile) Touch() error {
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
return nil return nil
} }
func (l *lockfile) IsReadWrite() bool { func (l *LockFile) IsReadWrite() bool {
return false return false
} }
func (l *lockfile) TouchedSince(when time.Time) bool { func (l *LockFile) TouchedSince(when time.Time) bool {
stat, err := os.Stat(l.file) stat, err := os.Stat(l.file)
if err != nil { if err != nil {
return true return true

View File

@ -0,0 +1,17 @@
package kernel
import "golang.org/x/sys/unix"
// Utsname represents the system name structure.
// It is passthrough for unix.Utsname in order to make it portable with
// other platforms where it is not available.
type Utsname unix.Utsname
func uname() (*unix.Utsname, error) {
uts := &unix.Utsname{}
if err := unix.Uname(uts); err != nil {
return nil, err
}
return uts, nil
}

View File

@ -1,13 +1,14 @@
//go:build freebsd || openbsd //go:build openbsd
// +build freebsd openbsd // +build openbsd
package kernel package kernel
import ( import (
"errors" "fmt"
"runtime"
) )
// A stub called by kernel_unix.go . // A stub called by kernel_unix.go .
func uname() (*Utsname, error) { func uname() (*Utsname, error) {
return nil, errors.New("Kernel version detection is available only on linux") return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS)
} }

View File

@ -1,5 +1,5 @@
//go:build !linux && !solaris //go:build !linux && !solaris && !freebsd
// +build !linux,!solaris // +build !linux,!solaris,!freebsd
package kernel package kernel

View File

@ -0,0 +1,214 @@
package regexp
import (
"io"
"regexp"
"sync"
)
// Regexp is a wrapper struct used for wrapping MustCompile regex expressions
// used as global variables. Using this stucture helps speed the startup time
// of apps that want to use global regex variables. This library initializes them on
// first use as opposed to the start of the executable.
type Regexp struct {
once sync.Once
regexp *regexp.Regexp
val string
}
func Delayed(val string) Regexp {
re := Regexp{
val: val,
}
if precompile {
re.regexp = regexp.MustCompile(re.val)
}
return re
}
func (re *Regexp) compile() {
if precompile {
return
}
re.once.Do(func() {
re.regexp = regexp.MustCompile(re.val)
})
}
func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte {
re.compile()
return re.regexp.Expand(dst, template, src, match)
}
func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte {
re.compile()
return re.regexp.ExpandString(dst, template, src, match)
}
func (re *Regexp) Find(b []byte) []byte {
re.compile()
return re.regexp.Find(b)
}
func (re *Regexp) FindAll(b []byte, n int) [][]byte {
re.compile()
return re.regexp.FindAll(b, n)
}
func (re *Regexp) FindAllIndex(b []byte, n int) [][]int {
re.compile()
return re.regexp.FindAllIndex(b, n)
}
func (re *Regexp) FindAllString(s string, n int) []string {
re.compile()
return re.regexp.FindAllString(s, n)
}
func (re *Regexp) FindAllStringIndex(s string, n int) [][]int {
re.compile()
return re.regexp.FindAllStringIndex(s, n)
}
func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
re.compile()
return re.regexp.FindAllStringSubmatch(s, n)
}
func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int {
re.compile()
return re.regexp.FindAllStringSubmatchIndex(s, n)
}
func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte {
re.compile()
return re.regexp.FindAllSubmatch(b, n)
}
func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int {
re.compile()
return re.regexp.FindAllSubmatchIndex(b, n)
}
func (re *Regexp) FindIndex(b []byte) (loc []int) {
re.compile()
return re.regexp.FindIndex(b)
}
func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) {
re.compile()
return re.regexp.FindReaderIndex(r)
}
func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
re.compile()
return re.regexp.FindReaderSubmatchIndex(r)
}
func (re *Regexp) FindString(s string) string {
re.compile()
return re.regexp.FindString(s)
}
func (re *Regexp) FindStringIndex(s string) (loc []int) {
re.compile()
return re.regexp.FindStringIndex(s)
}
func (re *Regexp) FindStringSubmatch(s string) []string {
re.compile()
return re.regexp.FindStringSubmatch(s)
}
func (re *Regexp) FindStringSubmatchIndex(s string) []int {
re.compile()
return re.regexp.FindStringSubmatchIndex(s)
}
func (re *Regexp) FindSubmatch(b []byte) [][]byte {
re.compile()
return re.regexp.FindSubmatch(b)
}
func (re *Regexp) FindSubmatchIndex(b []byte) []int {
re.compile()
return re.regexp.FindSubmatchIndex(b)
}
func (re *Regexp) LiteralPrefix() (prefix string, complete bool) {
re.compile()
return re.regexp.LiteralPrefix()
}
func (re *Regexp) Longest() {
re.compile()
re.regexp.Longest()
}
func (re *Regexp) Match(b []byte) bool {
re.compile()
return re.regexp.Match(b)
}
func (re *Regexp) MatchReader(r io.RuneReader) bool {
re.compile()
return re.regexp.MatchReader(r)
}
func (re *Regexp) MatchString(s string) bool {
re.compile()
return re.regexp.MatchString(s)
}
func (re *Regexp) NumSubexp() int {
re.compile()
return re.regexp.NumSubexp()
}
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
re.compile()
return re.regexp.ReplaceAll(src, repl)
}
func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
re.compile()
return re.regexp.ReplaceAllFunc(src, repl)
}
func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte {
re.compile()
return re.regexp.ReplaceAllLiteral(src, repl)
}
func (re *Regexp) ReplaceAllLiteralString(src, repl string) string {
re.compile()
return re.regexp.ReplaceAllLiteralString(src, repl)
}
func (re *Regexp) ReplaceAllString(src, repl string) string {
re.compile()
return re.regexp.ReplaceAllString(src, repl)
}
func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
re.compile()
return re.regexp.ReplaceAllStringFunc(src, repl)
}
func (re *Regexp) Split(s string, n int) []string {
re.compile()
return re.regexp.Split(s, n)
}
func (re *Regexp) String() string {
re.compile()
return re.regexp.String()
}
func (re *Regexp) SubexpIndex(name string) int {
re.compile()
return re.regexp.SubexpIndex(name)
}
func (re *Regexp) SubexpNames() []string {
re.compile()
return re.regexp.SubexpNames()
}

View File

@ -0,0 +1,6 @@
//go:build !regexp_precompile
// +build !regexp_precompile
package regexp
const precompile = false

View File

@ -0,0 +1,6 @@
//go:build regexp_precompile
// +build regexp_precompile
package regexp
const precompile = true

View File

@ -9,18 +9,19 @@ import (
"math" "math"
"math/big" "math/big"
"math/rand" "math/rand"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/containers/storage/pkg/regexp"
) )
const shortLen = 12 const shortLen = 12
var ( var (
validShortID = regexp.MustCompile("^[a-f0-9]{12}$") validShortID = regexp.Delayed("^[a-f0-9]{12}$")
validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) validHex = regexp.Delayed(`^[a-f0-9]{64}$`)
rngLock sync.Mutex rngLock sync.Mutex
rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held. rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held.

View File

@ -18,7 +18,9 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
uid: s.Uid, uid: s.Uid,
gid: s.Gid, gid: s.Gid,
rdev: uint64(s.Rdev), rdev: uint64(s.Rdev),
mtim: s.Mtimespec} mtim: s.Mtimespec,
dev: s.Dev}
st.flags = s.Flags st.flags = s.Flags
st.dev = s.Dev
return st, nil return st, nil
} }

View File

@ -9,7 +9,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
uid: s.Uid, uid: s.Uid,
gid: s.Gid, gid: s.Gid,
rdev: uint64(s.Rdev), rdev: uint64(s.Rdev),
mtim: s.Mtim}, nil mtim: s.Mtim,
dev: uint64(s.Dev)}, nil
} }
// FromStatT converts a syscall.Stat_t type to a system.Stat_t type // FromStatT converts a syscall.Stat_t type to a system.Stat_t type

View File

@ -18,6 +18,7 @@ type StatT struct {
rdev uint64 rdev uint64
size int64 size int64
mtim syscall.Timespec mtim syscall.Timespec
dev uint64
platformStatT platformStatT
} }
@ -51,6 +52,11 @@ func (s StatT) Mtim() syscall.Timespec {
return s.mtim return s.mtim
} }
// Dev returns a unique identifier for owning filesystem
func (s StatT) Dev() uint64 {
return s.dev
}
// Stat takes a path to a file and returns // Stat takes a path to a file and returns
// a system.StatT type pertaining to that file. // a system.StatT type pertaining to that file.
// //

View File

@ -43,6 +43,11 @@ func (s StatT) GID() uint32 {
return 0 return 0
} }
// Dev returns a unique identifier for owning filesystem
func (s StatT) Dev() uint64 {
return 0
}
// Stat takes a path to a file and returns // Stat takes a path to a file and returns
// a system.StatT type pertaining to that file. // a system.StatT type pertaining to that file.
// //

View File

@ -32,6 +32,10 @@ graphroot = "/var/lib/containers/storage"
# #
# rootless_storage_path = "$HOME/.local/share/containers/storage" # rootless_storage_path = "$HOME/.local/share/containers/storage"
# Transient store mode makes all container metadata be saved in temporary storage
# (i.e. runroot above). This is faster, but doesn't persist across reboots.
# transient_store = true
[storage.options] [storage.options]
# Storage options to be passed to underlying storage drivers # Storage options to be passed to underlying storage drivers

File diff suppressed because it is too large Load Diff

View File

@ -57,4 +57,6 @@ var (
ErrNotSupported = errors.New("not supported") ErrNotSupported = errors.New("not supported")
// ErrInvalidMappings is returned when the specified mappings are invalid. // ErrInvalidMappings is returned when the specified mappings are invalid.
ErrInvalidMappings = errors.New("invalid mappings specified") ErrInvalidMappings = errors.New("invalid mappings specified")
// ErrNoAvailableIDs is returned when there are not enough unused IDS within the user namespace.
ErrNoAvailableIDs = errors.New("not enough unused IDs in user namespace")
) )

View File

@ -19,9 +19,11 @@ import (
type TomlConfig struct { type TomlConfig struct {
Storage struct { Storage struct {
Driver string `toml:"driver,omitempty"` Driver string `toml:"driver,omitempty"`
DriverPriority []string `toml:"driver_priority,omitempty"`
RunRoot string `toml:"runroot,omitempty"` RunRoot string `toml:"runroot,omitempty"`
GraphRoot string `toml:"graphroot,omitempty"` GraphRoot string `toml:"graphroot,omitempty"`
RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` RootlessStoragePath string `toml:"rootless_storage_path,omitempty"`
TransientStore bool `toml:"transient_store,omitempty"`
Options cfg.OptionsConfig `toml:"options,omitempty"` Options cfg.OptionsConfig `toml:"options,omitempty"`
} `toml:"storage"` } `toml:"storage"`
} }
@ -212,10 +214,16 @@ type StoreOptions struct {
// RootlessStoragePath is the storage path for rootless users // RootlessStoragePath is the storage path for rootless users
// default $HOME/.local/share/containers/storage // default $HOME/.local/share/containers/storage
RootlessStoragePath string `toml:"rootless_storage_path"` RootlessStoragePath string `toml:"rootless_storage_path"`
// GraphDriverName is the underlying storage driver that we'll be // If the driver is not specified, the best suited driver will be picked
// using. It only needs to be specified the first time a Store is // either from GraphDriverPriority, if specified, or from the platform
// initialized for a given RunRoot and GraphRoot. // dependent priority list (in that order).
GraphDriverName string `json:"driver,omitempty"` GraphDriverName string `json:"driver,omitempty"`
// GraphDriverPriority is a list of storage drivers that will be tried
// to initialize the Store for a given RunRoot and GraphRoot unless a
// GraphDriverName is set.
// This list can be used to define a custom order in which the drivers
// will be tried.
GraphDriverPriority []string `json:"driver-priority,omitempty"`
// GraphDriverOptions are driver-specific options. // GraphDriverOptions are driver-specific options.
GraphDriverOptions []string `json:"driver-options,omitempty"` GraphDriverOptions []string `json:"driver-options,omitempty"`
// UIDMap and GIDMap are used for setting up a container's root filesystem // UIDMap and GIDMap are used for setting up a container's root filesystem
@ -234,6 +242,8 @@ type StoreOptions struct {
PullOptions map[string]string `toml:"pull_options"` PullOptions map[string]string `toml:"pull_options"`
// DisableVolatile doesn't allow volatile mounts when it is set. // DisableVolatile doesn't allow volatile mounts when it is set.
DisableVolatile bool `json:"disable-volatile,omitempty"` DisableVolatile bool `json:"disable-volatile,omitempty"`
// If transient, don't persist containers over boot (stores db in runroot)
TransientStore bool `json:"transient_store,omitempty"`
} }
// isRootlessDriver returns true if the given storage driver is valid for containers running as non root // isRootlessDriver returns true if the given storage driver is valid for containers running as non root
@ -377,8 +387,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver")
storeOptions.GraphDriverName = overlayDriver storeOptions.GraphDriverName = overlayDriver
} }
if storeOptions.GraphDriverName == "" { storeOptions.GraphDriverPriority = config.Storage.DriverPriority
logrus.Errorf("The storage 'driver' option must be set in %s to guarantee proper operation", configFile) if storeOptions.GraphDriverName == "" && len(storeOptions.GraphDriverPriority) == 0 {
logrus.Warnf("The storage 'driver' option should be set in %s. A driver was picked automatically.", configFile)
} }
if config.Storage.RunRoot != "" { if config.Storage.RunRoot != "" {
storeOptions.RunRoot = config.Storage.RunRoot storeOptions.RunRoot = config.Storage.RunRoot
@ -452,6 +463,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
} }
storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile
storeOptions.TransientStore = config.Storage.TransientStore
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...)

View File

@ -197,7 +197,7 @@ outer:
return 0, err return 0, err
} }
defer func() { defer func() {
if _, err2 := rlstore.Unmount(clayer.ID, true); err2 != nil { if _, err2 := rlstore.unmount(clayer.ID, true, false); err2 != nil {
if retErr == nil { if retErr == nil {
retErr = fmt.Errorf("unmounting temporary layer %#v: %w", clayer.ID, err2) retErr = fmt.Errorf("unmounting temporary layer %#v: %w", clayer.ID, err2)
} else { } else {
@ -264,7 +264,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image, rl
} }
} }
if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize { if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize {
return nil, nil, fmt.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize) return nil, nil, fmt.Errorf("the container needs a user namespace with size %v that is bigger than the maximum value allowed with userns=auto %v", size, s.autoNsMaxSize)
} }
} }

View File

@ -2,6 +2,7 @@ package storage
import ( import (
"fmt" "fmt"
"unicode"
"github.com/containers/storage/types" "github.com/containers/storage/types"
) )
@ -72,3 +73,15 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO
} }
return dedupeNames(result), nil return dedupeNames(result), nil
} }
func nameLooksLikeID(name string) bool {
if len(name) != 64 {
return false
}
for _, c := range name {
if !unicode.Is(unicode.ASCII_Hex_Digit, c) {
return false
}
}
return true
}

View File

@ -3,7 +3,7 @@
before: before:
hooks: hooks:
- ./gen.sh - ./gen.sh
- go install mvdan.cc/garble@latest - go install mvdan.cc/garble@v0.7.2
builds: builds:
- -

View File

@ -9,7 +9,6 @@ This package provides various compression algorithms.
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
@ -17,6 +16,15 @@ This package provides various compression algorithms.
# changelog # changelog
* Dec 11, 2022 (v1.15.13)
* zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
* zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
* Oct 26, 2022 (v1.15.12)
* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
* Sept 26, 2022 (v1.15.11) * Sept 26, 2022 (v1.15.11)
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678

View File

@ -86,11 +86,19 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
dict = dict[len(dict)-maxStatelessDict:] dict = dict[len(dict)-maxStatelessDict:]
} }
// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
var inDict []byte
for len(in) > 0 { for len(in) > 0 {
todo := in todo := in
if len(todo) > maxStatelessBlock-len(dict) { if len(inDict) > 0 {
if len(todo) > maxStatelessBlock-maxStatelessDict {
todo = todo[:maxStatelessBlock-maxStatelessDict]
}
} else if len(todo) > maxStatelessBlock-len(dict) {
todo = todo[:maxStatelessBlock-len(dict)] todo = todo[:maxStatelessBlock-len(dict)]
} }
inOrg := in
in = in[len(todo):] in = in[len(todo):]
uncompressed := todo uncompressed := todo
if len(dict) > 0 { if len(dict) > 0 {
@ -102,7 +110,11 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
todo = combined todo = combined
} }
// Compress // Compress
statelessEnc(&dst, todo, int16(len(dict))) if len(inDict) == 0 {
statelessEnc(&dst, todo, int16(len(dict)))
} else {
statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
}
isEof := eof && len(in) == 0 isEof := eof && len(in) == 0
if dst.n == 0 { if dst.n == 0 {
@ -119,7 +131,8 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
} }
if len(in) > 0 { if len(in) > 0 {
// Retain a dict if we have more // Retain a dict if we have more
dict = todo[len(todo)-maxStatelessDict:] inDict = inOrg[len(uncompressed)-maxStatelessDict:]
dict = nil
dst.Reset() dst.Reset()
} }
if bw.err != nil { if bw.err != nil {

View File

@ -365,29 +365,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
m := uint32(0) m := uint32(0)
if len(s.prevTable) > 0 { if len(s.prevTable) > 0 {
for i, v := range s.count[:] { for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m { if v > m {
m = v m = v
} }
if v > 0 { s.symbolLen = uint16(i) + 1
s.symbolLen = uint16(i) + 1 if i >= len(s.prevTable) {
if i >= len(s.prevTable) { reuse = false
reuse = false } else if s.prevTable[i].nBits == 0 {
} else { reuse = false
if s.prevTable[i].nBits == 0 {
reuse = false
}
}
} }
} }
return int(m), reuse return int(m), reuse
} }
for i, v := range s.count[:] { for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m { if v > m {
m = v m = v
} }
if v > 0 { s.symbolLen = uint16(i) + 1
s.symbolLen = uint16(i) + 1
}
} }
return int(m), false return int(m), false
} }

View File

@ -82,8 +82,9 @@ type blockDec struct {
err error err error
// Check against this crc // Check against this crc, if hasCRC is true.
checkCRC []byte checkCRC uint32
hasCRC bool
// Frame to use for singlethreaded decoding. // Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame. // Should not be used by the decoder itself since parent may be another frame.

View File

@ -4,7 +4,6 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"io" "io"
@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error {
} }
h.HeaderSize += 4 h.HeaderSize += 4
b, in := in[:4], in[4:] b, in := in[:4], in[4:]
if !bytes.Equal(b, frameMagic) { if string(b) != frameMagic {
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
return ErrMagicMismatch return ErrMagicMismatch
} }
if len(in) < 4 { if len(in) < 4 {
@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error {
} }
b, in = in[:size], in[size:] b, in = in[:size], in[size:]
h.HeaderSize += int(size) h.HeaderSize += int(size)
switch size { switch len(b) {
case 1: case 1:
h.DictionaryID = uint32(b[0]) h.DictionaryID = uint32(b[0])
case 2: case 2:
@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error {
} }
b, in = in[:fcsSize], in[fcsSize:] b, in = in[:fcsSize], in[fcsSize:]
h.HeaderSize += int(fcsSize) h.HeaderSize += int(fcsSize)
switch fcsSize { switch len(b) {
case 1: case 1:
h.FrameContentSize = uint64(b[0]) h.FrameContentSize = uint64(b[0])
case 2: case 2:

View File

@ -5,7 +5,6 @@
package zstd package zstd
import ( import (
"bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"io" "io"
@ -459,7 +458,11 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
} }
if !d.o.ignoreChecksum && len(next.b) > 0 { if d.o.ignoreChecksum {
return true
}
if len(next.b) > 0 {
n, err := d.current.crc.Write(next.b) n, err := d.current.crc.Write(next.b)
if err == nil { if err == nil {
if n != len(next.b) { if n != len(next.b) {
@ -467,18 +470,16 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
} }
} }
} }
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { if next.err == nil && next.d != nil && next.d.hasCRC {
got := d.current.crc.Sum64() got := uint32(d.current.crc.Sum64())
var tmp [4]byte if got != next.d.checkCRC {
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
} }
d.current.err = ErrCRCMismatch d.current.err = ErrCRCMismatch
} else { } else {
if debugDecoder { if debugDecoder {
println("CRC ok", tmp[:]) printf("CRC ok %08x\n", got)
} }
} }
} }
@ -918,18 +919,22 @@ decodeStream:
println("next block returned error:", err) println("next block returned error:", err)
} }
dec.err = err dec.err = err
dec.checkCRC = nil dec.hasCRC = false
if dec.Last && frame.HasCheckSum && err == nil { if dec.Last && frame.HasCheckSum && err == nil {
crc, err := frame.rawInput.readSmall(4) crc, err := frame.rawInput.readSmall(4)
if err != nil { if len(crc) < 4 {
if err == nil {
err = io.ErrUnexpectedEOF
}
println("CRC missing?", err) println("CRC missing?", err)
dec.err = err dec.err = err
} } else {
var tmp [4]byte dec.checkCRC = binary.LittleEndian.Uint32(crc)
copy(tmp[:], crc) dec.hasCRC = true
dec.checkCRC = tmp[:] if debugDecoder {
if debugDecoder { printf("found crc to check: %08x\n", dec.checkCRC)
println("found crc to check:", dec.checkCRC) }
} }
} }
err = dec.err err = dec.err

View File

@ -1,7 +1,6 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -20,7 +19,7 @@ type dict struct {
content []byte content []byte
} }
var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} const dictMagic = "\x37\xa4\x30\xec"
// ID returns the dictionary id or 0 if d is nil. // ID returns the dictionary id or 0 if d is nil.
func (d *dict) ID() uint32 { func (d *dict) ID() uint32 {
@ -50,7 +49,7 @@ func loadDict(b []byte) (*dict, error) {
ofDec: sequenceDec{fse: &fseDecoder{}}, ofDec: sequenceDec{fse: &fseDecoder{}},
mlDec: sequenceDec{fse: &fseDecoder{}}, mlDec: sequenceDec{fse: &fseDecoder{}},
} }
if !bytes.Equal(b[:4], dictMagic[:]) { if string(b[:4]) != dictMagic {
return nil, ErrMagicMismatch return nil, ErrMagicMismatch
} }
d.id = binary.LittleEndian.Uint32(b[4:8]) d.id = binary.LittleEndian.Uint32(b[4:8])

View File

@ -16,6 +16,7 @@ type fastBase struct {
cur int32 cur int32
// maximum offset. Should be at least 2x block size. // maximum offset. Should be at least 2x block size.
maxMatchOff int32 maxMatchOff int32
bufferReset int32
hist []byte hist []byte
crc *xxhash.Digest crc *xxhash.Digest
tmp [8]byte tmp [8]byte
@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc {
} }
func (e *fastBase) addBlock(src []byte) int32 { func (e *fastBase) addBlock(src []byte) int32 {
if debugAsserts && e.cur > bufferReset { if debugAsserts && e.cur > e.bufferReset {
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
} }
// check if we have space already // check if we have space already
if len(e.hist)+len(src) > cap(e.hist) { if len(e.hist)+len(src) > cap(e.hist) {
@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
} }
} }
a := src[s:] return int32(matchLen(src[s:], src[t:]))
b := src[t:]
b = b[:len(a)]
end := int32((len(a) >> 3) << 3)
for i := int32(0); i < end; i += 8 {
if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
return i + int32(bits.TrailingZeros64(diff)>>3)
}
}
a = a[end:]
b = b[end:]
for i := range a {
if a[i] != b[i] {
return int32(i) + end
}
}
return int32(len(a)) + end
} }
// Reset the encoding table. // Reset the encoding table.
@ -171,7 +155,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
// We offset current position so everything will be out of reach. // We offset current position so everything will be out of reach.
// If above reset line, history will be purged. // If above reset line, history will be purged.
if e.cur < bufferReset { if e.cur < e.bufferReset {
e.cur += e.maxMatchOff + int32(len(e.hist)) e.cur += e.maxMatchOff + int32(len(e.hist))
} }
e.hist = e.hist[:0] e.hist = e.hist[:0]

View File

@ -85,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [bestShortTableSize]prevEntry{}
e.table[i] = prevEntry{} e.longTable = [bestLongTableSize]prevEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = prevEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }
@ -193,8 +189,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
bestOf := func(a, b match) match { bestOf := func(a, b *match) *match {
if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
return a return a
} }
return b return b
@ -220,22 +216,26 @@ encodeLoop:
return m return m
} }
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
if canRepeat && best.length < goodEnough { if canRepeat && best.length < goodEnough {
cv32 := uint32(cv >> 8) cv32 := uint32(cv >> 8)
spp := s + 1 spp := s + 1
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) m1 := matchAt(spp-offset1, spp, cv32, 1)
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) m2 := matchAt(spp-offset2, spp, cv32, 2)
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) m3 := matchAt(spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
if best.length > 0 { if best.length > 0 {
cv32 = uint32(cv >> 24) cv32 = uint32(cv >> 24)
spp += 2 spp += 2
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) m1 := matchAt(spp-offset1, spp, cv32, 1)
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) m2 := matchAt(spp-offset2, spp, cv32, 2)
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) m3 := matchAt(spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
} }
} }
// Load next and check... // Load next and check...
@ -262,26 +262,33 @@ encodeLoop:
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1 // Short at s+1
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
// Long at s+1, s+2 // Long at s+1, s+2
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
if false { if false {
// Short at s+3. // Short at s+3.
// Too often worse... // Too often worse...
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
best = bestOf(best, &m)
} }
// See if we can find a better match by checking where the current best ends. // See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match. // Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit { if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL] candidateEnd := e.longTable[nextHashL]
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { // Start check at a fixed offset to allow for a few mismatches.
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) // For this compression level 2 yields the best results.
if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { const skipBeginning = 2
bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd := bestOf(best, &m)
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd = bestOf(bestEnd, &m)
} }
best = bestEnd best = bestEnd
} }

View File

@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [betterShortTableSize]tableEntry{}
e.table[i] = tableEntry{} e.longTable = [betterLongTableSize]prevEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = prevEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }
@ -587,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}

View File

@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [dFastShortTableSize]tableEntry{}
e.table[i] = tableEntry{} e.longTable = [dFastLongTableSize]tableEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = tableEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }
@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
if e.cur >= bufferReset { if e.cur >= e.bufferReset {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}
} }
@ -685,7 +681,7 @@ encodeLoop:
} }
// We do not store history, so we must offset e.cur to avoid false matches for next user. // We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < bufferReset { if e.cur < e.bufferReset {
e.cur += int32(len(src)) e.cur += int32(len(src))
} }
} }
@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}

View File

@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}
@ -310,7 +310,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
} }
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
if e.cur >= bufferReset { if e.cur >= e.bufferReset {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}
} }
@ -538,7 +538,7 @@ encodeLoop:
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
// We do not store history, so we must offset e.cur to avoid false matches for next user. // We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < bufferReset { if e.cur < e.bufferReset {
e.cur += int32(len(src)) e.cur += int32(len(src))
} }
} }
@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
return return
} }
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [tableSize]tableEntry{}
e.table[i] = tableEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }

View File

@ -8,6 +8,7 @@ import (
"crypto/rand" "crypto/rand"
"fmt" "fmt"
"io" "io"
"math"
rdebug "runtime/debug" rdebug "runtime/debug"
"sync" "sync"
@ -639,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
} }
return dst return dst
} }
// MaxEncodedSize returns the expected maximum
// size of an encoded block or stream.
func (e *Encoder) MaxEncodedSize(size int) int {
frameHeader := 4 + 2 // magic + frame header & window descriptor
if e.o.dict != nil {
frameHeader += 4
}
// Frame content size:
if size < 256 {
frameHeader++
} else if size < 65536+256 {
frameHeader += 2
} else if size < math.MaxInt32 {
frameHeader += 4
} else {
frameHeader += 8
}
// Final crc
if e.o.crc {
frameHeader += 4
}
// Max overhead is 3 bytes/block.
// There cannot be 0 blocks.
blocks := (size + e.o.blockSize) / e.o.blockSize
// Combine, add padding.
maxSz := frameHeader + 3*blocks + size
if e.o.pad > 1 {
maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
}
return maxSz
}

View File

@ -3,6 +3,7 @@ package zstd
import ( import (
"errors" "errors"
"fmt" "fmt"
"math"
"runtime" "runtime"
"strings" "strings"
) )
@ -47,22 +48,22 @@ func (o encoderOptions) encoder() encoder {
switch o.level { switch o.level {
case SpeedFastest: case SpeedFastest:
if o.dict != nil { if o.dict != nil {
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
} }
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedDefault: case SpeedDefault:
if o.dict != nil { if o.dict != nil {
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
} }
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
case SpeedBetterCompression: case SpeedBetterCompression:
if o.dict != nil { if o.dict != nil {
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
} }
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedBestCompression: case SpeedBestCompression:
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
} }
panic("unknown compression level") panic("unknown compression level")
} }

View File

@ -5,7 +5,7 @@
package zstd package zstd
import ( import (
"bytes" "encoding/binary"
"encoding/hex" "encoding/hex"
"errors" "errors"
"io" "io"
@ -43,9 +43,9 @@ const (
MaxWindowSize = 1 << 29 MaxWindowSize = 1 << 29
) )
var ( const (
frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} frameMagic = "\x28\xb5\x2f\xfd"
skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} skippableFrameMagic = "\x2a\x4d\x18"
) )
func newFrameDec(o decoderOptions) *frameDec { func newFrameDec(o decoderOptions) *frameDec {
@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error {
copy(signature[1:], b) copy(signature[1:], b)
} }
if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
if debugDecoder { if debugDecoder {
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
} }
// Break if not skippable frame. // Break if not skippable frame.
break break
@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error {
return err return err
} }
} }
if !bytes.Equal(signature[:], frameMagic) { if string(signature[:]) != frameMagic {
if debugDecoder { if debugDecoder {
println("Got magic numbers: ", signature, "want:", frameMagic) println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
} }
return ErrMagicMismatch return ErrMagicMismatch
} }
@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error {
return err return err
} }
var id uint32 var id uint32
switch size { switch len(b) {
case 1: case 1:
id = uint32(b[0]) id = uint32(b[0])
case 2: case 2:
@ -204,7 +204,7 @@ func (d *frameDec) reset(br byteBuffer) error {
println("Reading Frame content", err) println("Reading Frame content", err)
return err return err
} }
switch fcsSize { switch len(b) {
case 1: case 1:
d.FrameContentSize = uint64(b[0]) d.FrameContentSize = uint64(b[0])
case 2: case 2:
@ -305,7 +305,7 @@ func (d *frameDec) checkCRC() error {
} }
// We can overwrite upper tmp now // We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4) buf, err := d.rawInput.readSmall(4)
if err != nil { if err != nil {
println("CRC missing?", err) println("CRC missing?", err)
return err return err
@ -315,22 +315,17 @@ func (d *frameDec) checkCRC() error {
return nil return nil
} }
var tmp [4]byte want := binary.LittleEndian.Uint32(buf[:4])
got := d.crc.Sum64() got := uint32(d.crc.Sum64())
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
if !bytes.Equal(tmp[:], want) { if got != want {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want) printf("CRC check failed: got %08x, want %08x\n", got, want)
} }
return ErrCRCMismatch return ErrCRCMismatch
} }
if debugDecoder { if debugDecoder {
println("CRC ok", tmp[:]) printf("CRC ok %08x\n", got)
} }
return nil return nil
} }

View File

@ -2,12 +2,7 @@
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go high-quality hashing algorithm that is much faster than anything in the Go
standard library. standard library.
@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64 func (*Digest) Sum64() uint64
``` ```
This implementation provides a fast pure-Go implementation and an even faster The package is written with optimized pure Go and also contains even faster
assembly implementation for amd64. assembly implementations for amd64 and arm64. If desired, the `purego` build tag
opts into using the Go code even on those architectures.
[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
This package is in a module and the latest code is in version 2 of the module.
You need a version of Go with at least "minimal module compatibility" to use
github.com/cespare/xxhash/v2:
* 1.9.7+ for Go 1.9
* 1.10.3+ for Go 1.10
* Go 1.11 or later
I recommend using the latest release of Go.
## Benchmarks ## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64. implementations of Sum64.
| input size | purego | asm | | input size | purego | asm |
| --- | --- | --- | | ---------- | --------- | --------- |
| 5 B | 979.66 MB/s | 1291.17 MB/s | | 4 B | 1.3 GB/s | 1.2 GB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s | | 16 B | 2.9 GB/s | 3.5 GB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s | | 100 B | 6.9 GB/s | 8.1 GB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s | | 4 KB | 11.7 GB/s | 16.7 GB/s |
| 10 MB | 12.0 GB/s | 17.3 GB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
the following commands under Go 1.11.2: CPU using the following commands under Go 1.19.2:
``` ```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
$ go test -benchtime 10s -bench '/xxhash,direct,bytes' benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
``` ```
## Projects using this package ## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb) - [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus) - [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache) - [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

View File

@ -18,19 +18,11 @@ const (
prime5 uint64 = 2870177450012600261 prime5 uint64 = 2870177450012600261
) )
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where // Store the primes in an array as well.
// possible in the Go code is worth a small (but measurable) performance boost //
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for // The consts are used when possible in Go code to avoid MOVs but we need a
// convenience in the Go code in a few places where we need to intentionally // contiguous array of the assembly code.
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Digest implements hash.Hash64. // Digest implements hash.Hash64.
type Digest struct { type Digest struct {
@ -52,10 +44,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused. // Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() { func (d *Digest) Reset() {
d.v1 = prime1v + prime2 d.v1 = primes[0] + prime2
d.v2 = prime2 d.v2 = prime2
d.v3 = 0 d.v3 = 0
d.v4 = -prime1v d.v4 = -primes[0]
d.total = 0 d.total = 0
d.n = 0 d.n = 0
} }
@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b) n = len(b)
d.total += uint64(n) d.total += uint64(n)
memleft := d.mem[d.n&(len(d.mem)-1):]
if d.n+n < 32 { if d.n+n < 32 {
// This new data doesn't even fill the current block. // This new data doesn't even fill the current block.
copy(d.mem[d.n:], b) copy(memleft, b)
d.n += n d.n += n
return return
} }
if d.n > 0 { if d.n > 0 {
// Finish off the partial block. // Finish off the partial block.
copy(d.mem[d.n:], b) c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8])) d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16])) d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24])) d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32])) d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:] b = b[c:]
d.n = 0 d.n = 0
} }
@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total h += d.total
i, end := 0, d.n b := d.mem[:d.n&(len(d.mem)-1)]
for ; i+8 <= end; i += 8 { for ; len(b) >= 8; b = b[8:] {
k1 := round(0, u64(d.mem[i:i+8])) k1 := round(0, u64(b[:8]))
h ^= k1 h ^= k1
h = rol27(h)*prime1 + prime4 h = rol27(h)*prime1 + prime4
} }
if i+4 <= end { if len(b) >= 4 {
h ^= uint64(u32(d.mem[i:i+4])) * prime1 h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3 h = rol23(h)*prime2 + prime3
i += 4 b = b[4:]
} }
for i < end { for ; len(b) > 0; b = b[1:] {
h ^= uint64(d.mem[i]) * prime5 h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1 h = rol11(h) * prime1
i++
} }
h ^= h >> 33 h ^= h >> 33

View File

@ -1,3 +1,4 @@
//go:build !appengine && gc && !purego && !noasm
// +build !appengine // +build !appengine
// +build gc // +build gc
// +build !purego // +build !purego
@ -5,212 +6,205 @@
#include "textflag.h" #include "textflag.h"
// Register allocation: // Registers:
// AX h #define h AX
// SI pointer to advance through b #define d AX
// DX n #define p SI // pointer to advance through b
// BX loop end #define n DX
// R8 v1, k1 #define end BX // loop end
// R9 v2 #define v1 R8
// R10 v3 #define v2 R9
// R11 v4 #define v3 R10
// R12 tmp #define v4 R11
// R13 prime1v #define x R12
// R14 prime2v #define prime1 R13
// DI prime4v #define prime2 R14
#define prime4 DI
// round reads from and advances the buffer pointer in SI. #define round(acc, x) \
// It assumes that R13 has prime1v and R14 has prime2v. IMULQ prime2, x \
#define round(r) \ ADDQ x, acc \
MOVQ (SI), R12 \ ROLQ $31, acc \
ADDQ $8, SI \ IMULQ prime1, acc
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val. // round0 performs the operation x = round(0, x).
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. #define round0(x) \
#define mergeRound(acc, val) \ IMULQ prime2, x \
IMULQ R14, val \ ROLQ $31, x \
ROLQ $31, val \ IMULQ prime1, x
IMULQ R13, val \
XORQ val, acc \ // mergeRound applies a merge round on the two registers acc and x.
IMULQ R13, acc \ // It assumes that prime1, prime2, and prime4 have been loaded.
ADDQ DI, acc #define mergeRound(acc, x) \
round0(x) \
XORQ x, acc \
IMULQ prime1, acc \
ADDQ prime4, acc
// blockLoop processes as many 32-byte blocks as possible,
// updating v1, v2, v3, and v4. It assumes that there is at least one block
// to process.
#define blockLoop() \
loop: \
MOVQ +0(p), x \
round(v1, x) \
MOVQ +8(p), x \
round(v2, x) \
MOVQ +16(p), x \
round(v3, x) \
MOVQ +24(p), x \
round(v4, x) \
ADDQ $32, p \
CMPQ p, end \
JLE loop
// func Sum64(b []byte) uint64 // func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32 TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes. // Load fixed primes.
MOVQ ·prime1v(SB), R13 MOVQ ·primes+0(SB), prime1
MOVQ ·prime2v(SB), R14 MOVQ ·primes+8(SB), prime2
MOVQ ·prime4v(SB), DI MOVQ ·primes+24(SB), prime4
// Load slice. // Load slice.
MOVQ b_base+0(FP), SI MOVQ b_base+0(FP), p
MOVQ b_len+8(FP), DX MOVQ b_len+8(FP), n
LEAQ (SI)(DX*1), BX LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32. // The first loop limit will be len(b)-32.
SUBQ $32, BX SUBQ $32, end
// Check whether we have at least one block. // Check whether we have at least one block.
CMPQ DX, $32 CMPQ n, $32
JLT noBlocks JLT noBlocks
// Set up initial state (v1, v2, v3, v4). // Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8 MOVQ prime1, v1
ADDQ R14, R8 ADDQ prime2, v1
MOVQ R14, R9 MOVQ prime2, v2
XORQ R10, R10 XORQ v3, v3
XORQ R11, R11 XORQ v4, v4
SUBQ R13, R11 SUBQ prime1, v4
// Loop until SI > BX. blockLoop()
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX MOVQ v1, h
JLE blockLoop ROLQ $1, h
MOVQ v2, x
ROLQ $7, x
ADDQ x, h
MOVQ v3, x
ROLQ $12, x
ADDQ x, h
MOVQ v4, x
ROLQ $18, x
ADDQ x, h
MOVQ R8, AX mergeRound(h, v1)
ROLQ $1, AX mergeRound(h, v2)
MOVQ R9, R12 mergeRound(h, v3)
ROLQ $7, R12 mergeRound(h, v4)
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
JMP afterBlocks JMP afterBlocks
noBlocks: noBlocks:
MOVQ ·prime5v(SB), AX MOVQ ·primes+32(SB), h
afterBlocks: afterBlocks:
ADDQ DX, AX ADDQ n, h
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. ADDQ $24, end
ADDQ $24, BX CMPQ p, end
JG try4
CMPQ SI, BX loop8:
JG fourByte MOVQ (p), x
ADDQ $8, p
round0(x)
XORQ x, h
ROLQ $27, h
IMULQ prime1, h
ADDQ prime4, h
wordLoop: CMPQ p, end
// Calculate k1. JLE loop8
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
XORQ R8, AX try4:
ROLQ $27, AX ADDQ $4, end
IMULQ R13, AX CMPQ p, end
ADDQ DI, AX JG try1
CMPQ SI, BX MOVL (p), x
JLE wordLoop ADDQ $4, p
IMULQ prime1, x
XORQ x, h
fourByte: ROLQ $23, h
ADDQ $4, BX IMULQ prime2, h
CMPQ SI, BX ADDQ ·primes+16(SB), h
JG singles
MOVL (SI), R8 try1:
ADDQ $4, SI ADDQ $4, end
IMULQ R13, R8 CMPQ p, end
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ SI, BX
JGE finalize JGE finalize
singlesLoop: loop1:
MOVBQZX (SI), R12 MOVBQZX (p), x
ADDQ $1, SI ADDQ $1, p
IMULQ ·prime5v(SB), R12 IMULQ ·primes+32(SB), x
XORQ R12, AX XORQ x, h
ROLQ $11, h
IMULQ prime1, h
ROLQ $11, AX CMPQ p, end
IMULQ R13, AX JL loop1
CMPQ SI, BX
JL singlesLoop
finalize: finalize:
MOVQ AX, R12 MOVQ h, x
SHRQ $33, R12 SHRQ $33, x
XORQ R12, AX XORQ x, h
IMULQ R14, AX IMULQ prime2, h
MOVQ AX, R12 MOVQ h, x
SHRQ $29, R12 SHRQ $29, x
XORQ R12, AX XORQ x, h
IMULQ ·prime3v(SB), AX IMULQ ·primes+16(SB), h
MOVQ AX, R12 MOVQ h, x
SHRQ $32, R12 SHRQ $32, x
XORQ R12, AX XORQ x, h
MOVQ AX, ret+24(FP) MOVQ h, ret+24(FP)
RET RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int // func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40 TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round. // Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13 MOVQ ·primes+0(SB), prime1
MOVQ ·prime2v(SB), R14 MOVQ ·primes+8(SB), prime2
// Load slice. // Load slice.
MOVQ b_base+8(FP), SI MOVQ b_base+8(FP), p
MOVQ b_len+16(FP), DX MOVQ b_len+16(FP), n
LEAQ (SI)(DX*1), BX LEAQ (p)(n*1), end
SUBQ $32, BX SUBQ $32, end
// Load vN from d. // Load vN from d.
MOVQ d+0(FP), AX MOVQ s+0(FP), d
MOVQ 0(AX), R8 // v1 MOVQ 0(d), v1
MOVQ 8(AX), R9 // v2 MOVQ 8(d), v2
MOVQ 16(AX), R10 // v3 MOVQ 16(d), v3
MOVQ 24(AX), R11 // v4 MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is // We don't need to check the loop condition here; this function is
// always called with at least one block of data to process. // always called with at least one block of data to process.
blockLoop: blockLoop()
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
// Copy vN back to d. // Copy vN back to d.
MOVQ R8, 0(AX) MOVQ v1, 0(d)
MOVQ R9, 8(AX) MOVQ v2, 8(d)
MOVQ R10, 16(AX) MOVQ v3, 16(d)
MOVQ R11, 24(AX) MOVQ v4, 24(d)
// The number of bytes written is SI minus the old base pointer. // The number of bytes written is p minus the old base pointer.
SUBQ b_base+8(FP), SI SUBQ b_base+8(FP), p
MOVQ SI, ret+32(FP) MOVQ p, ret+32(FP)
RET RET

View File

@ -1,13 +1,17 @@
// +build gc,!purego,!noasm //go:build !appengine && gc && !purego && !noasm
// +build !appengine
// +build gc
// +build !purego
// +build !noasm
#include "textflag.h" #include "textflag.h"
// Register allocation. // Registers:
#define digest R1 #define digest R1
#define h R2 // Return value. #define h R2 // return value
#define p R3 // Input pointer. #define p R3 // input pointer
#define len R4 #define n R4 // input length
#define nblocks R5 // len / 32. #define nblocks R5 // n / 32
#define prime1 R7 #define prime1 R7
#define prime2 R8 #define prime2 R8
#define prime3 R9 #define prime3 R9
@ -25,60 +29,52 @@
#define round(acc, x) \ #define round(acc, x) \
MADD prime2, acc, x, acc \ MADD prime2, acc, x, acc \
ROR $64-31, acc \ ROR $64-31, acc \
MUL prime1, acc \ MUL prime1, acc
// x = round(0, x). // round0 performs the operation x = round(0, x).
#define round0(x) \ #define round0(x) \
MUL prime2, x \ MUL prime2, x \
ROR $64-31, x \ ROR $64-31, x \
MUL prime1, x \ MUL prime1, x
#define mergeRound(x) \ #define mergeRound(acc, x) \
round0(x) \ round0(x) \
EOR x, h \ EOR x, acc \
MADD h, prime4, prime1, h \ MADD acc, prime4, prime1, acc
// Update v[1-4] with 32-byte blocks. Assumes len >= 32. // blockLoop processes as many 32-byte blocks as possible,
#define blocksLoop() \ // updating v1, v2, v3, and v4. It assumes that n >= 32.
LSR $5, len, nblocks \ #define blockLoop() \
PCALIGN $16 \ LSR $5, n, nblocks \
loop: \ PCALIGN $16 \
LDP.P 32(p), (x1, x2) \ loop: \
round(v1, x1) \ LDP.P 16(p), (x1, x2) \
LDP -16(p), (x3, x4) \ LDP.P 16(p), (x3, x4) \
round(v2, x2) \ round(v1, x1) \
SUB $1, nblocks \ round(v2, x2) \
round(v3, x3) \ round(v3, x3) \
round(v4, x4) \ round(v4, x4) \
CBNZ nblocks, loop \ SUB $1, nblocks \
CBNZ nblocks, loop
// The primes are repeated here to ensure that they're stored
// in a contiguous array, so we can load them with LDP.
DATA primes<> +0(SB)/8, $11400714785074694791
DATA primes<> +8(SB)/8, $14029467366897019727
DATA primes<>+16(SB)/8, $1609587929392839161
DATA primes<>+24(SB)/8, $9650029242287828579
DATA primes<>+32(SB)/8, $2870177450012600261
GLOBL primes<>(SB), NOPTR+RODATA, $40
// func Sum64(b []byte) uint64 // func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
LDP b_base+0(FP), (p, len) LDP b_base+0(FP), (p, n)
LDP primes<> +0(SB), (prime1, prime2) LDP ·primes+0(SB), (prime1, prime2)
LDP primes<>+16(SB), (prime3, prime4) LDP ·primes+16(SB), (prime3, prime4)
MOVD primes<>+32(SB), prime5 MOVD ·primes+32(SB), prime5
CMP $32, len CMP $32, n
CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
BLO afterLoop BLT afterLoop
ADD prime1, prime2, v1 ADD prime1, prime2, v1
MOVD prime2, v2 MOVD prime2, v2
MOVD $0, v3 MOVD $0, v3
NEG prime1, v4 NEG prime1, v4
blocksLoop() blockLoop()
ROR $64-1, v1, x1 ROR $64-1, v1, x1
ROR $64-7, v2, x2 ROR $64-7, v2, x2
@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
ADD x3, x4 ADD x3, x4
ADD x2, x4, h ADD x2, x4, h
mergeRound(v1) mergeRound(h, v1)
mergeRound(v2) mergeRound(h, v2)
mergeRound(v3) mergeRound(h, v3)
mergeRound(v4) mergeRound(h, v4)
afterLoop: afterLoop:
ADD len, h ADD n, h
TBZ $4, len, try8 TBZ $4, n, try8
LDP.P 16(p), (x1, x2) LDP.P 16(p), (x1, x2)
round0(x1) round0(x1)
// NOTE: here and below, sequencing the EOR after the ROR (using a
// rotated register) is worth a small but measurable speedup for small
// inputs.
ROR $64-27, h ROR $64-27, h
EOR x1 @> 64-27, h, h EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h MADD h, prime4, prime1, h
round0(x2) round0(x2)
ROR $64-27, h ROR $64-27, h
EOR x2 @> 64-27, h EOR x2 @> 64-27, h, h
MADD h, prime4, prime1, h MADD h, prime4, prime1, h
try8: try8:
TBZ $3, len, try4 TBZ $3, n, try4
MOVD.P 8(p), x1 MOVD.P 8(p), x1
round0(x1) round0(x1)
ROR $64-27, h ROR $64-27, h
EOR x1 @> 64-27, h EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h MADD h, prime4, prime1, h
try4: try4:
TBZ $2, len, try2 TBZ $2, n, try2
MOVWU.P 4(p), x2 MOVWU.P 4(p), x2
MUL prime1, x2 MUL prime1, x2
ROR $64-23, h ROR $64-23, h
EOR x2 @> 64-23, h EOR x2 @> 64-23, h, h
MADD h, prime3, prime2, h MADD h, prime3, prime2, h
try2: try2:
TBZ $1, len, try1 TBZ $1, n, try1
MOVHU.P 2(p), x3 MOVHU.P 2(p), x3
AND $255, x3, x1 AND $255, x3, x1
LSR $8, x3, x2 LSR $8, x3, x2
MUL prime5, x1 MUL prime5, x1
ROR $64-11, h ROR $64-11, h
EOR x1 @> 64-11, h EOR x1 @> 64-11, h, h
MUL prime1, h MUL prime1, h
MUL prime5, x2 MUL prime5, x2
ROR $64-11, h ROR $64-11, h
EOR x2 @> 64-11, h EOR x2 @> 64-11, h, h
MUL prime1, h MUL prime1, h
try1: try1:
TBZ $0, len, end TBZ $0, n, finalize
MOVBU (p), x4 MOVBU (p), x4
MUL prime5, x4 MUL prime5, x4
ROR $64-11, h ROR $64-11, h
EOR x4 @> 64-11, h EOR x4 @> 64-11, h, h
MUL prime1, h MUL prime1, h
end: finalize:
EOR h >> 33, h EOR h >> 33, h
MUL prime2, h MUL prime2, h
EOR h >> 29, h EOR h >> 29, h
@ -163,24 +163,22 @@ end:
RET RET
// func writeBlocks(d *Digest, b []byte) int // func writeBlocks(d *Digest, b []byte) int
// TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Assumes len(b) >= 32. LDP ·primes+0(SB), (prime1, prime2)
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
LDP primes<>(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously. // Load state. Assume v[1-4] are stored contiguously.
MOVD d+0(FP), digest MOVD d+0(FP), digest
LDP 0(digest), (v1, v2) LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4) LDP 16(digest), (v3, v4)
LDP b_base+8(FP), (p, len) LDP b_base+8(FP), (p, n)
blocksLoop() blockLoop()
// Store updated state. // Store updated state.
STP (v1, v2), 0(digest) STP (v1, v2), 0(digest)
STP (v3, v4), 16(digest) STP (v3, v4), 16(digest)
BIC $31, len BIC $31, n
MOVD len, ret+32(FP) MOVD n, ret+32(FP)
RET RET

View File

@ -13,4 +13,4 @@ package xxhash
func Sum64(b []byte) uint64 func Sum64(b []byte) uint64
//go:noescape //go:noescape
func writeBlocks(d *Digest, b []byte) int func writeBlocks(s *Digest, b []byte) int

View File

@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64 var h uint64
if n >= 32 { if n >= 32 {
v1 := prime1v + prime2 v1 := primes[0] + prime2
v2 := prime2 v2 := prime2
v3 := uint64(0) v3 := uint64(0)
v4 := -prime1v v4 := -primes[0]
for len(b) >= 32 { for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)])) v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)])) v2 = round(v2, u64(b[8:16:len(b)]))
@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n) h += uint64(n)
i, end := 0, len(b) for ; len(b) >= 8; b = b[8:] {
for ; i+8 <= end; i += 8 { k1 := round(0, u64(b[:8]))
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1 h ^= k1
h = rol27(h)*prime1 + prime4 h = rol27(h)*prime1 + prime4
} }
if i+4 <= end { if len(b) >= 4 {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3 h = rol23(h)*prime2 + prime3
i += 4 b = b[4:]
} }
for ; i < end; i++ { for ; len(b) > 0; b = b[1:] {
h ^= uint64(b[i]) * prime5 h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1 h = rol11(h) * prime1
} }

View File

@ -320,10 +320,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP) MOVQ $0x00000004, ret+24(FP)
RET RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV // Requires: CMOV
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
@ -617,10 +613,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP) MOVQ $0x00000004, ret+24(FP)
RET RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV // Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
@ -897,10 +889,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP) MOVQ $0x00000004, ret+24(FP)
RET RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV // Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
@ -1152,10 +1140,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP) MOVQ $0x00000004, ret+24(FP)
RET RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Requires: SSE // Requires: SSE
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
@ -1389,8 +1373,7 @@ loop_finished:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVQ DI, 104(AX) MOVQ DI, 104(AX)
MOVQ 80(AX), CX SUBQ 80(AX), SI
SUBQ CX, SI
MOVQ SI, 112(AX) MOVQ SI, 112(AX)
RET RET
@ -1402,8 +1385,7 @@ error_match_off_too_big:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVQ DI, 104(AX) MOVQ DI, 104(AX)
MOVQ 80(AX), CX SUBQ 80(AX), SI
SUBQ CX, SI
MOVQ SI, 112(AX) MOVQ SI, 112(AX)
RET RET
@ -1747,8 +1729,7 @@ loop_finished:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVQ DI, 104(AX) MOVQ DI, 104(AX)
MOVQ 80(AX), CX SUBQ 80(AX), SI
SUBQ CX, SI
MOVQ SI, 112(AX) MOVQ SI, 112(AX)
RET RET
@ -1760,8 +1741,7 @@ error_match_off_too_big:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVQ DI, 104(AX) MOVQ DI, 104(AX)
MOVQ 80(AX), CX SUBQ 80(AX), SI
SUBQ CX, SI
MOVQ SI, 112(AX) MOVQ SI, 112(AX)
RET RET

View File

@ -36,9 +36,6 @@ const forcePreDef = false
// zstdMinMatch is the minimum zstd match length. // zstdMinMatch is the minimum zstd match length.
const zstdMinMatch = 3 const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
// fcsUnknown is used for unknown frame content size. // fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64 const fcsUnknown = math.MaxUint64
@ -110,26 +107,25 @@ func printf(format string, a ...interface{}) {
} }
} }
// matchLen returns the maximum length. // matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two. // a must be the shortest of the two.
// The function also returns whether all bytes matched. func matchLen(a, b []byte) (n int) {
func matchLen(a, b []byte) int { for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
b = b[:len(a)] diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
for i := 0; i < len(a)-7; i += 8 { if diff != 0 {
if diff := load64(a, i) ^ load64(b, i); diff != 0 { return n + bits.TrailingZeros64(diff)>>3
return i + (bits.TrailingZeros64(diff) >> 3)
} }
n += 8
} }
checked := (len(a) >> 3) << 3
a = a[checked:]
b = b[checked:]
for i := range a { for i := range a {
if a[i] != b[i] { if a[i] != b[i] {
return i + checked break
} }
n++
} }
return len(a) + checked return n
} }
func load3232(b []byte, i int32) uint32 { func load3232(b []byte, i int32) uint32 {
@ -140,10 +136,6 @@ func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:]) return binary.LittleEndian.Uint64(b[i:])
} }
func load64(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
type byter interface { type byter interface {
Bytes() []byte Bytes() []byte
Len() int Len() int

View File

@ -23,3 +23,6 @@ cmd/xb/xb
# default compression test file # default compression test file
enwik8* enwik8*
# file generated by example
example.xz

View File

@ -1,4 +1,4 @@
Copyright (c) 2014-2021 Ulrich Kunitz Copyright (c) 2014-2022 Ulrich Kunitz
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without

View File

@ -53,6 +53,10 @@ func main() {
} }
``` ```
## Documentation
You can find the full documentation at [pkg.go.dev](https://pkg.go.dev/github.com/ulikunitz/xz).
## Using the gxz compression tool ## Using the gxz compression tool
The package includes a gxz command line utility for compression and The package includes a gxz command line utility for compression and

View File

@ -86,11 +86,20 @@
## Log ## Log
### 2022-12-12
Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation
returned an error if the dictionary size was less than 4096 byte, but the
recommendation stated the actual used window size should be set to 4096 byte in
that case. It actually was the pull request
[#52](https://github.com/ulikunitz/xz/pull/52). The new patch v0.5.11 will fix
it.
### 2021-02-02 ### 2021-02-02
Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The
function allocated a slice of records immediately after reading the value function allocated a slice of records immediately after reading the value
without further checks. Since the number has been too large the make function without further checks. Sincex the number has been too large the make function
did panic. The fix is to check the number against the expected number of records did panic. The fix is to check the number against the expected number of records
before allocating the records. before allocating the records.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
@ -31,8 +31,7 @@ import (
// printed. There is no control over the order of the items printed and // printed. There is no control over the order of the items printed and
// the format. The full format is: // the format. The full format is:
// //
// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message // 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message
//
const ( const (
Ldate = 1 << iota // the date: 2009-01-23 Ldate = 1 << iota // the date: 2009-01-23
Ltime // the time: 01:23:23 Ltime // the time: 01:23:23

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. // Copyright 2014-2022 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

Some files were not shown because too many files have changed in this diff Show More