mirror of
https://github.com/containers/skopeo.git
synced 2025-06-28 07:37:41 +00:00
Merge pull request #1809 from containers/renovate/github.com-containers-storage-1.x
fix(deps): update module github.com/containers/storage to v1.44.0
This commit is contained in:
commit
e62fcca5ed
10
go.mod
10
go.mod
@ -6,7 +6,7 @@ require (
|
|||||||
github.com/containers/common v0.50.1
|
github.com/containers/common v0.50.1
|
||||||
github.com/containers/image/v5 v5.23.1-0.20221019175208-1dd254487708
|
github.com/containers/image/v5 v5.23.1-0.20221019175208-1dd254487708
|
||||||
github.com/containers/ocicrypt v1.1.6
|
github.com/containers/ocicrypt v1.1.6
|
||||||
github.com/containers/storage v1.43.0
|
github.com/containers/storage v1.44.0
|
||||||
github.com/docker/distribution v2.8.1+incompatible
|
github.com/docker/distribution v2.8.1+incompatible
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc2
|
github.com/opencontainers/image-spec v1.1.0-rc2
|
||||||
@ -22,13 +22,13 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v1.2.0 // indirect
|
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
github.com/Microsoft/hcsshim v0.9.5 // indirect
|
||||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||||
github.com/containerd/cgroups v1.0.4 // indirect
|
github.com/containerd/cgroups v1.0.4 // indirect
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
|
github.com/containerd/stargz-snapshotter/estargz v0.12.1 // indirect
|
||||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
@ -50,7 +50,7 @@ require (
|
|||||||
github.com/imdario/mergo v0.3.13 // indirect
|
github.com/imdario/mergo v0.3.13 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.15.11 // indirect
|
github.com/klauspost/compress v1.15.12 // indirect
|
||||||
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
|
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
|
||||||
github.com/kr/pretty v0.3.0 // indirect
|
github.com/kr/pretty v0.3.0 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
15
go.sum
15
go.sum
@ -169,8 +169,9 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
|
|||||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||||
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
|
|
||||||
github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||||
|
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
|
||||||
|
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||||
@ -207,8 +208,9 @@ github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT
|
|||||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||||
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
|
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
|
||||||
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||||
github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I=
|
|
||||||
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||||
|
github.com/Microsoft/hcsshim v0.9.5 h1:AbV+VPfTrIVffukazHcpxmz/sRiE6YaMDzHWR9BXZHo=
|
||||||
|
github.com/Microsoft/hcsshim v0.9.5/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
@ -451,8 +453,9 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
|
|||||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM=
|
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
|
github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
|
||||||
|
github.com/containerd/stargz-snapshotter/estargz v0.12.1 h1:+7nYmHJb0tEkcRaAW+MHqoKaJYZmkikupxCqVtmPuY0=
|
||||||
|
github.com/containerd/stargz-snapshotter/estargz v0.12.1/go.mod h1:12VUuCq3qPq4y8yUW+l5w3+oXV3cx2Po3KSe/SmPGqw=
|
||||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||||
@ -493,8 +496,9 @@ github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pA
|
|||||||
github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
|
github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
|
||||||
github.com/containers/ocicrypt v1.1.6 h1:uoG52u2e91RE4UqmBICZY8dNshgfvkdl3BW6jnxiFaI=
|
github.com/containers/ocicrypt v1.1.6 h1:uoG52u2e91RE4UqmBICZY8dNshgfvkdl3BW6jnxiFaI=
|
||||||
github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
|
github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
|
||||||
github.com/containers/storage v1.43.0 h1:P+zulGXA3mqe2GnYmZU0xu87Wy1M0PVHM2ucrgmvTdU=
|
|
||||||
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
||||||
|
github.com/containers/storage v1.44.0 h1:xCFhwILjjU+Hg133d07EVCgaaXn0ileGcVsAUcV8tDY=
|
||||||
|
github.com/containers/storage v1.44.0/go.mod h1:HSfx7vUXwKPatPMqhgMw3mI3c3ijIJPZV5O0sj/mVxI=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
@ -1167,8 +1171,9 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
|
|||||||
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
|
|
||||||
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||||
|
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||||
|
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0=
|
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0=
|
||||||
|
42
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
42
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@ -21,7 +21,9 @@ type Unmarshaler interface {
|
|||||||
UnmarshalTOML(interface{}) error
|
UnmarshalTOML(interface{}) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`.
|
// Unmarshal decodes the contents of data in TOML format into a pointer v.
|
||||||
|
//
|
||||||
|
// See [Decoder] for a description of the decoding process.
|
||||||
func Unmarshal(data []byte, v interface{}) error {
|
func Unmarshal(data []byte, v interface{}) error {
|
||||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||||
return err
|
return err
|
||||||
@ -29,13 +31,12 @@ func Unmarshal(data []byte, v interface{}) error {
|
|||||||
|
|
||||||
// Decode the TOML data in to the pointer v.
|
// Decode the TOML data in to the pointer v.
|
||||||
//
|
//
|
||||||
// See the documentation on Decoder for a description of the decoding process.
|
// See [Decoder] for a description of the decoding process.
|
||||||
func Decode(data string, v interface{}) (MetaData, error) {
|
func Decode(data string, v interface{}) (MetaData, error) {
|
||||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodeFile is just like Decode, except it will automatically read the
|
// DecodeFile reads the contents of a file and decodes it with [Decode].
|
||||||
// contents of the file at path and decode it for you.
|
|
||||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||||
fp, err := os.Open(path)
|
fp, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -48,7 +49,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
|
|||||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||||
//
|
//
|
||||||
// This type can be used for any value, which will cause decoding to be delayed.
|
// This type can be used for any value, which will cause decoding to be delayed.
|
||||||
// You can use the PrimitiveDecode() function to "manually" decode these values.
|
// You can use [PrimitiveDecode] to "manually" decode these values.
|
||||||
//
|
//
|
||||||
// NOTE: The underlying representation of a `Primitive` value is subject to
|
// NOTE: The underlying representation of a `Primitive` value is subject to
|
||||||
// change. Do not rely on it.
|
// change. Do not rely on it.
|
||||||
@ -70,15 +71,15 @@ const (
|
|||||||
|
|
||||||
// Decoder decodes TOML data.
|
// Decoder decodes TOML data.
|
||||||
//
|
//
|
||||||
// TOML tables correspond to Go structs or maps (dealer's choice – they can be
|
// TOML tables correspond to Go structs or maps; they can be used
|
||||||
// used interchangeably).
|
// interchangeably, but structs offer better type safety.
|
||||||
//
|
//
|
||||||
// TOML table arrays correspond to either a slice of structs or a slice of maps.
|
// TOML table arrays correspond to either a slice of structs or a slice of maps.
|
||||||
//
|
//
|
||||||
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
|
// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the
|
||||||
// in the local timezone.
|
// local timezone.
|
||||||
//
|
//
|
||||||
// time.Duration types are treated as nanoseconds if the TOML value is an
|
// [time.Duration] types are treated as nanoseconds if the TOML value is an
|
||||||
// integer, or they're parsed with time.ParseDuration() if they're strings.
|
// integer, or they're parsed with time.ParseDuration() if they're strings.
|
||||||
//
|
//
|
||||||
// All other TOML types (float, string, int, bool and array) correspond to the
|
// All other TOML types (float, string, int, bool and array) correspond to the
|
||||||
@ -90,7 +91,7 @@ const (
|
|||||||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||||
// email addresses.
|
// email addresses.
|
||||||
//
|
//
|
||||||
// Key mapping
|
// ### Key mapping
|
||||||
//
|
//
|
||||||
// TOML keys can map to either keys in a Go map or field names in a Go struct.
|
// TOML keys can map to either keys in a Go map or field names in a Go struct.
|
||||||
// The special `toml` struct tag can be used to map TOML keys to struct fields
|
// The special `toml` struct tag can be used to map TOML keys to struct fields
|
||||||
@ -168,17 +169,16 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
|||||||
return md, md.unify(p.mapping, rv)
|
return md, md.unify(p.mapping, rv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
// PrimitiveDecode is just like the other Decode* functions, except it decodes a
|
||||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
// TOML value that has already been parsed. Valid primitive values can *only* be
|
||||||
// can *only* be obtained from values filled by the decoder functions,
|
// obtained from values filled by the decoder functions, including this method.
|
||||||
// including this method. (i.e., `v` may contain more `Primitive`
|
// (i.e., v may contain more [Primitive] values.)
|
||||||
// values.)
|
|
||||||
//
|
//
|
||||||
// Meta data for primitive values is included in the meta data returned by
|
// Meta data for primitive values is included in the meta data returned by the
|
||||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
// Decode* functions with one exception: keys returned by the Undecoded method
|
||||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
// will only reflect keys that were decoded. Namely, any keys hidden behind a
|
||||||
// behind a Primitive will be considered undecoded. Executing this method will
|
// Primitive will be considered undecoded. Executing this method will update the
|
||||||
// update the undecoded keys in the meta data. (See the example.)
|
// undecoded keys in the meta data. (See the example.)
|
||||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||||
md.context = primValue.context
|
md.context = primValue.context
|
||||||
defer func() { md.context = nil }()
|
defer func() { md.context = nil }()
|
||||||
|
4
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
4
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
@ -7,8 +7,8 @@ import (
|
|||||||
"io/fs"
|
"io/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DecodeFS is just like Decode, except it will automatically read the contents
|
// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
|
||||||
// of the file at `path` from a fs.FS instance.
|
// [Decode].
|
||||||
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
|
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
|
||||||
fp, err := fsys.Open(path)
|
fp, err := fsys.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
22
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
22
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
@ -1,13 +1,11 @@
|
|||||||
/*
|
// Package toml implements decoding and encoding of TOML files.
|
||||||
Package toml implements decoding and encoding of TOML files.
|
//
|
||||||
|
// This package supports TOML v1.0.0, as specified at https://toml.io
|
||||||
This package supports TOML v1.0.0, as listed on https://toml.io
|
//
|
||||||
|
// There is also support for delaying decoding with the Primitive type, and
|
||||||
There is also support for delaying decoding with the Primitive type, and
|
// querying the set of keys in a TOML document with the MetaData type.
|
||||||
querying the set of keys in a TOML document with the MetaData type.
|
//
|
||||||
|
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
||||||
The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
// and can be used to verify if TOML document is valid. It can also be used to
|
||||||
and can be used to verify if TOML document is valid. It can also be used to
|
// print the type of each key.
|
||||||
print the type of each key.
|
|
||||||
*/
|
|
||||||
package toml
|
package toml
|
||||||
|
40
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
40
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@ -79,12 +79,12 @@ type Marshaler interface {
|
|||||||
// Encoder encodes a Go to a TOML document.
|
// Encoder encodes a Go to a TOML document.
|
||||||
//
|
//
|
||||||
// The mapping between Go values and TOML values should be precisely the same as
|
// The mapping between Go values and TOML values should be precisely the same as
|
||||||
// for the Decode* functions.
|
// for [Decode].
|
||||||
//
|
//
|
||||||
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
|
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
|
||||||
// representation.
|
// representation.
|
||||||
//
|
//
|
||||||
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
|
// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to
|
||||||
// encoding the value as custom TOML.
|
// encoding the value as custom TOML.
|
||||||
//
|
//
|
||||||
// If you want to write arbitrary binary data then you will need to use
|
// If you want to write arbitrary binary data then you will need to use
|
||||||
@ -130,7 +130,7 @@ func NewEncoder(w io.Writer) *Encoder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode writes a TOML representation of the Go value to the Encoder's writer.
|
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
|
||||||
//
|
//
|
||||||
// An error is returned if the value given cannot be encoded to a valid TOML
|
// An error is returned if the value given cannot be encoded to a valid TOML
|
||||||
// document.
|
// document.
|
||||||
@ -261,7 +261,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
|||||||
enc.eElement(reflect.ValueOf(v))
|
enc.eElement(reflect.ValueOf(v))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n)))
|
encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n))
|
||||||
}
|
}
|
||||||
|
|
||||||
switch rv.Kind() {
|
switch rv.Kind() {
|
||||||
@ -504,7 +504,8 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||||||
if opts.name != "" {
|
if opts.name != "" {
|
||||||
keyName = opts.name
|
keyName = opts.name
|
||||||
}
|
}
|
||||||
if opts.omitempty && isEmpty(fieldVal) {
|
|
||||||
|
if opts.omitempty && enc.isEmpty(fieldVal) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if opts.omitzero && isZero(fieldVal) {
|
if opts.omitzero && isZero(fieldVal) {
|
||||||
@ -648,12 +649,26 @@ func isZero(rv reflect.Value) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func isEmpty(rv reflect.Value) bool {
|
func (enc *Encoder) isEmpty(rv reflect.Value) bool {
|
||||||
switch rv.Kind() {
|
switch rv.Kind() {
|
||||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||||
return rv.Len() == 0
|
return rv.Len() == 0
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
|
if rv.Type().Comparable() {
|
||||||
|
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
|
||||||
|
}
|
||||||
|
// Need to also check if all the fields are empty, otherwise something
|
||||||
|
// like this with uncomparable types will always return true:
|
||||||
|
//
|
||||||
|
// type a struct{ field b }
|
||||||
|
// type b struct{ s []string }
|
||||||
|
// s := a{field: b{s: []string{"AAA"}}}
|
||||||
|
for i := 0; i < rv.NumField(); i++ {
|
||||||
|
if !enc.isEmpty(rv.Field(i)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
return !rv.Bool()
|
return !rv.Bool()
|
||||||
}
|
}
|
||||||
@ -668,16 +683,15 @@ func (enc *Encoder) newline() {
|
|||||||
|
|
||||||
// Write a key/value pair:
|
// Write a key/value pair:
|
||||||
//
|
//
|
||||||
// key = <any value>
|
// key = <any value>
|
||||||
//
|
//
|
||||||
// This is also used for "k = v" in inline tables; so something like this will
|
// This is also used for "k = v" in inline tables; so something like this will
|
||||||
// be written in three calls:
|
// be written in three calls:
|
||||||
//
|
//
|
||||||
// ┌────────────────────┐
|
// ┌───────────────────┐
|
||||||
// │ ┌───┐ ┌─────┐│
|
// │ ┌───┐ ┌────┐│
|
||||||
// v v v v vv
|
// v v v v vv
|
||||||
// key = {k = v, k2 = v2}
|
// key = {k = 1, k2 = 2}
|
||||||
//
|
|
||||||
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
||||||
if len(key) == 0 {
|
if len(key) == 0 {
|
||||||
encPanic(errNoKey)
|
encPanic(errNoKey)
|
||||||
|
67
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
67
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
@ -5,57 +5,60 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseError is returned when there is an error parsing the TOML syntax.
|
// ParseError is returned when there is an error parsing the TOML syntax such as
|
||||||
//
|
// invalid syntax, duplicate keys, etc.
|
||||||
// For example invalid syntax, duplicate keys, etc.
|
|
||||||
//
|
//
|
||||||
// In addition to the error message itself, you can also print detailed location
|
// In addition to the error message itself, you can also print detailed location
|
||||||
// information with context by using ErrorWithPosition():
|
// information with context by using [ErrorWithPosition]:
|
||||||
//
|
//
|
||||||
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
||||||
//
|
//
|
||||||
// At line 4, column 2-7:
|
// At line 4, column 2-7:
|
||||||
//
|
//
|
||||||
// 2 | fruit = []
|
// 2 | fruit = []
|
||||||
// 3 |
|
// 3 |
|
||||||
// 4 | [[fruit]] # Not allowed
|
// 4 | [[fruit]] # Not allowed
|
||||||
// ^^^^^
|
// ^^^^^
|
||||||
//
|
//
|
||||||
// Furthermore, the ErrorWithUsage() can be used to print the above with some
|
// [ErrorWithUsage] can be used to print the above with some more detailed usage
|
||||||
// more detailed usage guidance:
|
// guidance:
|
||||||
//
|
//
|
||||||
// toml: error: newlines not allowed within inline tables
|
// toml: error: newlines not allowed within inline tables
|
||||||
//
|
//
|
||||||
// At line 1, column 18:
|
// At line 1, column 18:
|
||||||
//
|
//
|
||||||
// 1 | x = [{ key = 42 #
|
// 1 | x = [{ key = 42 #
|
||||||
// ^
|
// ^
|
||||||
//
|
//
|
||||||
// Error help:
|
// Error help:
|
||||||
//
|
//
|
||||||
// Inline tables must always be on a single line:
|
// Inline tables must always be on a single line:
|
||||||
//
|
//
|
||||||
// table = {key = 42, second = 43}
|
// table = {key = 42, second = 43}
|
||||||
//
|
//
|
||||||
// It is invalid to split them over multiple lines like so:
|
// It is invalid to split them over multiple lines like so:
|
||||||
//
|
//
|
||||||
// # INVALID
|
// # INVALID
|
||||||
// table = {
|
// table = {
|
||||||
// key = 42,
|
// key = 42,
|
||||||
// second = 43
|
// second = 43
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// Use regular for this:
|
// Use regular for this:
|
||||||
//
|
//
|
||||||
// [table]
|
// [table]
|
||||||
// key = 42
|
// key = 42
|
||||||
// second = 43
|
// second = 43
|
||||||
type ParseError struct {
|
type ParseError struct {
|
||||||
Message string // Short technical message.
|
Message string // Short technical message.
|
||||||
Usage string // Longer message with usage guidance; may be blank.
|
Usage string // Longer message with usage guidance; may be blank.
|
||||||
Position Position // Position of the error
|
Position Position // Position of the error
|
||||||
LastKey string // Last parsed key, may be blank.
|
LastKey string // Last parsed key, may be blank.
|
||||||
Line int // Line the error occurred. Deprecated: use Position.
|
|
||||||
|
// Line the error occurred.
|
||||||
|
//
|
||||||
|
// Deprecated: use [Position].
|
||||||
|
Line int
|
||||||
|
|
||||||
err error
|
err error
|
||||||
input string
|
input string
|
||||||
@ -83,7 +86,7 @@ func (pe ParseError) Error() string {
|
|||||||
|
|
||||||
// ErrorWithUsage() returns the error with detailed location context.
|
// ErrorWithUsage() returns the error with detailed location context.
|
||||||
//
|
//
|
||||||
// See the documentation on ParseError.
|
// See the documentation on [ParseError].
|
||||||
func (pe ParseError) ErrorWithPosition() string {
|
func (pe ParseError) ErrorWithPosition() string {
|
||||||
if pe.input == "" { // Should never happen, but just in case.
|
if pe.input == "" { // Should never happen, but just in case.
|
||||||
return pe.Error()
|
return pe.Error()
|
||||||
@ -124,7 +127,7 @@ func (pe ParseError) ErrorWithPosition() string {
|
|||||||
// ErrorWithUsage() returns the error with detailed location context and usage
|
// ErrorWithUsage() returns the error with detailed location context and usage
|
||||||
// guidance.
|
// guidance.
|
||||||
//
|
//
|
||||||
// See the documentation on ParseError.
|
// See the documentation on [ParseError].
|
||||||
func (pe ParseError) ErrorWithUsage() string {
|
func (pe ParseError) ErrorWithUsage() string {
|
||||||
m := pe.ErrorWithPosition()
|
m := pe.ErrorWithPosition()
|
||||||
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
|
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
|
||||||
|
2
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
2
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@ -771,7 +771,7 @@ func lexRawString(lx *lexer) stateFn {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
// a string. It assumes that the beginning ''' has already been consumed and
|
||||||
// ignored.
|
// ignored.
|
||||||
func lexMultilineRawString(lx *lexer) stateFn {
|
func lexMultilineRawString(lx *lexer) stateFn {
|
||||||
r := lx.next()
|
r := lx.next()
|
||||||
|
4
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
4
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
@ -71,7 +71,7 @@ func (md *MetaData) Keys() []Key {
|
|||||||
// Undecoded returns all keys that have not been decoded in the order in which
|
// Undecoded returns all keys that have not been decoded in the order in which
|
||||||
// they appear in the original TOML document.
|
// they appear in the original TOML document.
|
||||||
//
|
//
|
||||||
// This includes keys that haven't been decoded because of a Primitive value.
|
// This includes keys that haven't been decoded because of a [Primitive] value.
|
||||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||||
//
|
//
|
||||||
// Also note that decoding into an empty interface will result in no decoding,
|
// Also note that decoding into an empty interface will result in no decoding,
|
||||||
@ -89,7 +89,7 @@ func (md *MetaData) Undecoded() []Key {
|
|||||||
return undecoded
|
return undecoded
|
||||||
}
|
}
|
||||||
|
|
||||||
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
|
// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get
|
||||||
// values of this type.
|
// values of this type.
|
||||||
type Key []string
|
type Key []string
|
||||||
|
|
||||||
|
6
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
generated
vendored
@ -154,7 +154,7 @@ func (e *HcsError) Error() string {
|
|||||||
|
|
||||||
func (e *HcsError) Temporary() bool {
|
func (e *HcsError) Temporary() bool {
|
||||||
err, ok := e.Err.(net.Error)
|
err, ok := e.Err.(net.Error)
|
||||||
return ok && err.Temporary()
|
return ok && err.Temporary() //nolint:staticcheck
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *HcsError) Timeout() bool {
|
func (e *HcsError) Timeout() bool {
|
||||||
@ -193,7 +193,7 @@ func (e *SystemError) Error() string {
|
|||||||
|
|
||||||
func (e *SystemError) Temporary() bool {
|
func (e *SystemError) Temporary() bool {
|
||||||
err, ok := e.Err.(net.Error)
|
err, ok := e.Err.(net.Error)
|
||||||
return ok && err.Temporary()
|
return ok && err.Temporary() //nolint:staticcheck
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *SystemError) Timeout() bool {
|
func (e *SystemError) Timeout() bool {
|
||||||
@ -224,7 +224,7 @@ func (e *ProcessError) Error() string {
|
|||||||
|
|
||||||
func (e *ProcessError) Temporary() bool {
|
func (e *ProcessError) Temporary() bool {
|
||||||
err, ok := e.Err.(net.Error)
|
err, ok := e.Err.(net.Error)
|
||||||
return ok && err.Temporary()
|
return ok && err.Temporary() //nolint:staticcheck
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *ProcessError) Timeout() bool {
|
func (e *ProcessError) Timeout() bool {
|
||||||
|
21
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
21
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@ -18,7 +18,6 @@ env:
|
|||||||
#### Cache-image names to test with (double-quotes around names are critical)
|
#### Cache-image names to test with (double-quotes around names are critical)
|
||||||
###
|
###
|
||||||
FEDORA_NAME: "fedora-36"
|
FEDORA_NAME: "fedora-36"
|
||||||
PRIOR_FEDORA_NAME: "fedora-35"
|
|
||||||
UBUNTU_NAME: "ubuntu-2204"
|
UBUNTU_NAME: "ubuntu-2204"
|
||||||
|
|
||||||
# GCE project where images live
|
# GCE project where images live
|
||||||
@ -26,7 +25,6 @@ env:
|
|||||||
# VM Image built in containers/automation_images
|
# VM Image built in containers/automation_images
|
||||||
IMAGE_SUFFIX: "c5878804328480768"
|
IMAGE_SUFFIX: "c5878804328480768"
|
||||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
|
||||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||||
|
|
||||||
####
|
####
|
||||||
@ -90,15 +88,6 @@ fedora_testing_task: &fedora_testing
|
|||||||
journal_log_script: '${_JOURNALCMD} || true'
|
journal_log_script: '${_JOURNALCMD} || true'
|
||||||
|
|
||||||
|
|
||||||
prior_fedora_testing_task:
|
|
||||||
<<: *fedora_testing
|
|
||||||
alias: prior_fedora_testing
|
|
||||||
name: *std_test_name
|
|
||||||
env:
|
|
||||||
OS_NAME: "${PRIOR_FEDORA_NAME}"
|
|
||||||
VM_IMAGE: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
|
||||||
|
|
||||||
|
|
||||||
ubuntu_testing_task: &ubuntu_testing
|
ubuntu_testing_task: &ubuntu_testing
|
||||||
<<: *fedora_testing
|
<<: *fedora_testing
|
||||||
alias: ubuntu_testing
|
alias: ubuntu_testing
|
||||||
@ -117,7 +106,7 @@ lint_task:
|
|||||||
env:
|
env:
|
||||||
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
|
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
|
||||||
container:
|
container:
|
||||||
image: golang:1.16
|
image: golang:1.17
|
||||||
modules_cache:
|
modules_cache:
|
||||||
fingerprint_script: cat go.sum
|
fingerprint_script: cat go.sum
|
||||||
folder: $GOPATH/pkg/mod
|
folder: $GOPATH/pkg/mod
|
||||||
@ -125,7 +114,7 @@ lint_task:
|
|||||||
echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list
|
echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install -y libbtrfs-dev libdevmapper-dev
|
apt-get install -y libbtrfs-dev libdevmapper-dev
|
||||||
test_script: make lint
|
test_script: make local-validate && make lint
|
||||||
|
|
||||||
|
|
||||||
# Update metadata on VM images referenced by this repository state
|
# Update metadata on VM images referenced by this repository state
|
||||||
@ -140,7 +129,6 @@ meta_task:
|
|||||||
# Space-separated list of images used by this repository state
|
# Space-separated list of images used by this repository state
|
||||||
IMGNAMES: |-
|
IMGNAMES: |-
|
||||||
${FEDORA_CACHE_IMAGE_NAME}
|
${FEDORA_CACHE_IMAGE_NAME}
|
||||||
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
|
|
||||||
${UBUNTU_CACHE_IMAGE_NAME}
|
${UBUNTU_CACHE_IMAGE_NAME}
|
||||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||||
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
|
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
|
||||||
@ -154,7 +142,7 @@ meta_task:
|
|||||||
|
|
||||||
vendor_task:
|
vendor_task:
|
||||||
container:
|
container:
|
||||||
image: golang:1.16
|
image: golang:1.17
|
||||||
modules_cache:
|
modules_cache:
|
||||||
fingerprint_script: cat go.sum
|
fingerprint_script: cat go.sum
|
||||||
folder: $GOPATH/pkg/mod
|
folder: $GOPATH/pkg/mod
|
||||||
@ -167,11 +155,10 @@ success_task:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- lint
|
- lint
|
||||||
- fedora_testing
|
- fedora_testing
|
||||||
- prior_fedora_testing
|
|
||||||
- ubuntu_testing
|
- ubuntu_testing
|
||||||
- meta
|
- meta
|
||||||
- vendor
|
- vendor
|
||||||
container:
|
container:
|
||||||
image: golang:1.16
|
image: golang:1.17
|
||||||
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
|
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
|
||||||
script: /bin/true
|
script: /bin/true
|
||||||
|
4
vendor/github.com/containers/storage/Makefile
generated
vendored
4
vendor/github.com/containers/storage/Makefile
generated
vendored
@ -92,7 +92,7 @@ local-test-integration: local-binary ## run the integration tests on the host (r
|
|||||||
test-integration: local-binary ## run the integration tests using VMs
|
test-integration: local-binary ## run the integration tests using VMs
|
||||||
$(RUNINVM) $(MAKE) local-$@
|
$(RUNINVM) $(MAKE) local-$@
|
||||||
|
|
||||||
local-validate: ## validate DCO and gofmt on the host
|
local-validate: install.tools ## validate DCO and gofmt on the host
|
||||||
@./hack/git-validation.sh
|
@./hack/git-validation.sh
|
||||||
@./hack/gofmt.sh
|
@./hack/gofmt.sh
|
||||||
|
|
||||||
@ -120,6 +120,6 @@ vendor-in-container:
|
|||||||
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor
|
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor
|
||||||
|
|
||||||
vendor:
|
vendor:
|
||||||
$(GO) mod tidy
|
$(GO) mod tidy -compat=1.17
|
||||||
$(GO) mod vendor
|
$(GO) mod vendor
|
||||||
$(GO) mod verify
|
$(GO) mod verify
|
||||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
|||||||
1.43.0
|
1.44.0
|
||||||
|
303
vendor/github.com/containers/storage/containers.go
generated
vendored
303
vendor/github.com/containers/storage/containers.go
generated
vendored
@ -66,12 +66,25 @@ type Container struct {
|
|||||||
Flags map[string]interface{} `json:"flags,omitempty"`
|
Flags map[string]interface{} `json:"flags,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerStore provides bookkeeping for information about Containers.
|
// rwContainerStore provides bookkeeping for information about Containers.
|
||||||
type ContainerStore interface {
|
type rwContainerStore interface {
|
||||||
FileBasedStore
|
metadataStore
|
||||||
MetadataStore
|
containerBigDataStore
|
||||||
ContainerBigDataStore
|
flaggableStore
|
||||||
FlaggableStore
|
|
||||||
|
// startWriting makes sure the store is fresh, and locks it for writing.
|
||||||
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
|
startWriting() error
|
||||||
|
|
||||||
|
// stopWriting releases locks obtained by startWriting.
|
||||||
|
stopWriting()
|
||||||
|
|
||||||
|
// startReading makes sure the store is fresh, and locks it for reading.
|
||||||
|
// If this succeeds, the caller MUST call stopReading().
|
||||||
|
startReading() error
|
||||||
|
|
||||||
|
// stopReading releases locks obtained by startReading.
|
||||||
|
stopReading()
|
||||||
|
|
||||||
// Create creates a container that has a specified ID (or generates a
|
// Create creates a container that has a specified ID (or generates a
|
||||||
// random one if an empty value is supplied) and optional names,
|
// random one if an empty value is supplied) and optional names,
|
||||||
@ -81,18 +94,8 @@ type ContainerStore interface {
|
|||||||
// convenience of the caller, nothing more.
|
// convenience of the caller, nothing more.
|
||||||
Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
|
Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
|
||||||
|
|
||||||
// SetNames updates the list of names associated with the container
|
// updateNames modifies names associated with a container based on (op, names).
|
||||||
// with the specified ID.
|
updateNames(id string, names []string, op updateNameOperation) error
|
||||||
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
|
|
||||||
SetNames(id string, names []string) error
|
|
||||||
|
|
||||||
// AddNames adds the supplied values to the list of names associated with the container with
|
|
||||||
// the specified id.
|
|
||||||
AddNames(id string, names []string) error
|
|
||||||
|
|
||||||
// RemoveNames removes the supplied values from the list of names associated with the container with
|
|
||||||
// the specified id.
|
|
||||||
RemoveNames(id string, names []string) error
|
|
||||||
|
|
||||||
// Get retrieves information about a container given an ID or name.
|
// Get retrieves information about a container given an ID or name.
|
||||||
Get(id string) (*Container, error)
|
Get(id string) (*Container, error)
|
||||||
@ -157,12 +160,12 @@ func (c *Container) ProcessLabel() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) MountOpts() []string {
|
func (c *Container) MountOpts() []string {
|
||||||
switch c.Flags[mountOptsFlag].(type) {
|
switch value := c.Flags[mountOptsFlag].(type) {
|
||||||
case []string:
|
case []string:
|
||||||
return c.Flags[mountOptsFlag].([]string)
|
return value
|
||||||
case []interface{}:
|
case []interface{}:
|
||||||
var mountOpts []string
|
var mountOpts []string
|
||||||
for _, v := range c.Flags[mountOptsFlag].([]interface{}) {
|
for _, v := range value {
|
||||||
if flag, ok := v.(string); ok {
|
if flag, ok := v.(string); ok {
|
||||||
mountOpts = append(mountOpts, flag)
|
mountOpts = append(mountOpts, flag)
|
||||||
}
|
}
|
||||||
@ -173,6 +176,80 @@ func (c *Container) MountOpts() []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
|
||||||
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
|
//
|
||||||
|
// This is an internal implementation detail of containerStore construction, every other caller
|
||||||
|
// should use startWriting() instead.
|
||||||
|
func (r *containerStore) startWritingWithReload(canReload bool) error {
|
||||||
|
r.lockfile.Lock()
|
||||||
|
succeeded := false
|
||||||
|
defer func() {
|
||||||
|
if !succeeded {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if canReload {
|
||||||
|
if err := r.reloadIfChanged(true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
succeeded = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// startWriting makes sure the store is fresh, and locks it for writing.
|
||||||
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
|
func (r *containerStore) startWriting() error {
|
||||||
|
return r.startWritingWithReload(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopWriting releases locks obtained by startWriting.
|
||||||
|
func (r *containerStore) stopWriting() {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// startReading makes sure the store is fresh, and locks it for reading.
|
||||||
|
// If this succeeds, the caller MUST call stopReading().
|
||||||
|
func (r *containerStore) startReading() error {
|
||||||
|
r.lockfile.RLock()
|
||||||
|
succeeded := false
|
||||||
|
defer func() {
|
||||||
|
if !succeeded {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := r.reloadIfChanged(false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
succeeded = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopReading releases locks obtained by startReading.
|
||||||
|
func (r *containerStore) stopReading() {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// reloadIfChanged reloads the contents of the store from disk if it is changed.
|
||||||
|
//
|
||||||
|
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||||
|
// if it is held for writing.
|
||||||
|
func (r *containerStore) reloadIfChanged(lockedForWriting bool) error {
|
||||||
|
r.loadMut.Lock()
|
||||||
|
defer r.loadMut.Unlock()
|
||||||
|
|
||||||
|
modified, err := r.lockfile.Modified()
|
||||||
|
if err == nil && modified {
|
||||||
|
return r.load(lockedForWriting)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (r *containerStore) Containers() ([]Container, error) {
|
func (r *containerStore) Containers() ([]Container, error) {
|
||||||
containers := make([]Container, len(r.containers))
|
containers := make([]Container, len(r.containers))
|
||||||
for i := range r.containers {
|
for i := range r.containers {
|
||||||
@ -193,48 +270,60 @@ func (r *containerStore) datapath(id, key string) string {
|
|||||||
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
|
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) Load() error {
|
// load reloads the contents of the store from disk.
|
||||||
|
//
|
||||||
|
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||||
|
// if it is held for writing.
|
||||||
|
func (r *containerStore) load(lockedForWriting bool) error {
|
||||||
needSave := false
|
needSave := false
|
||||||
rpath := r.containerspath()
|
rpath := r.containerspath()
|
||||||
data, err := os.ReadFile(rpath)
|
data, err := os.ReadFile(rpath)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
containers := []*Container{}
|
containers := []*Container{}
|
||||||
layers := make(map[string]*Container)
|
if len(data) != 0 {
|
||||||
idlist := []string{}
|
if err := json.Unmarshal(data, &containers); err != nil {
|
||||||
ids := make(map[string]*Container)
|
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||||
names := make(map[string]*Container)
|
|
||||||
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
|
|
||||||
idlist = make([]string, 0, len(containers))
|
|
||||||
for n, container := range containers {
|
|
||||||
idlist = append(idlist, container.ID)
|
|
||||||
ids[container.ID] = containers[n]
|
|
||||||
layers[container.LayerID] = containers[n]
|
|
||||||
for _, name := range container.Names {
|
|
||||||
if conflict, ok := names[name]; ok {
|
|
||||||
r.removeName(conflict, name)
|
|
||||||
needSave = true
|
|
||||||
}
|
|
||||||
names[name] = containers[n]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
idlist := make([]string, 0, len(containers))
|
||||||
|
layers := make(map[string]*Container)
|
||||||
|
ids := make(map[string]*Container)
|
||||||
|
names := make(map[string]*Container)
|
||||||
|
for n, container := range containers {
|
||||||
|
idlist = append(idlist, container.ID)
|
||||||
|
ids[container.ID] = containers[n]
|
||||||
|
layers[container.LayerID] = containers[n]
|
||||||
|
for _, name := range container.Names {
|
||||||
|
if conflict, ok := names[name]; ok {
|
||||||
|
r.removeName(conflict, name)
|
||||||
|
needSave = true
|
||||||
|
}
|
||||||
|
names[name] = containers[n]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
r.containers = containers
|
r.containers = containers
|
||||||
r.idindex = truncindex.NewTruncIndex(idlist)
|
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||||
r.byid = ids
|
r.byid = ids
|
||||||
r.bylayer = layers
|
r.bylayer = layers
|
||||||
r.byname = names
|
r.byname = names
|
||||||
if needSave {
|
if needSave {
|
||||||
|
if !lockedForWriting {
|
||||||
|
// Eventually, the callers should be modified to retry with a write lock, instead.
|
||||||
|
return errors.New("container store is inconsistent and the current caller does not hold a write lock")
|
||||||
|
}
|
||||||
return r.Save()
|
return r.Save()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Save saves the contents of the store to disk. It should be called with
|
||||||
|
// the lock held, locked for writing.
|
||||||
func (r *containerStore) Save() error {
|
func (r *containerStore) Save() error {
|
||||||
if !r.Locked() {
|
r.lockfile.AssertLockedForWriting()
|
||||||
return errors.New("container store is not locked")
|
|
||||||
}
|
|
||||||
rpath := r.containerspath()
|
rpath := r.containerspath()
|
||||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -243,11 +332,13 @@ func (r *containerStore) Save() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer r.Touch()
|
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
|
||||||
return ioutils.AtomicWriteFile(rpath, jdata, 0600)
|
return err
|
||||||
|
}
|
||||||
|
return r.lockfile.Touch()
|
||||||
}
|
}
|
||||||
|
|
||||||
func newContainerStore(dir string) (ContainerStore, error) {
|
func newContainerStore(dir string) (rwContainerStore, error) {
|
||||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -255,8 +346,6 @@ func newContainerStore(dir string) (ContainerStore, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
lockfile.Lock()
|
|
||||||
defer lockfile.Unlock()
|
|
||||||
cstore := containerStore{
|
cstore := containerStore{
|
||||||
lockfile: lockfile,
|
lockfile: lockfile,
|
||||||
dir: dir,
|
dir: dir,
|
||||||
@ -265,7 +354,11 @@ func newContainerStore(dir string) (ContainerStore, error) {
|
|||||||
bylayer: make(map[string]*Container),
|
bylayer: make(map[string]*Container),
|
||||||
byname: make(map[string]*Container),
|
byname: make(map[string]*Container),
|
||||||
}
|
}
|
||||||
if err := cstore.Load(); err != nil {
|
if err := cstore.startWritingWithReload(false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer cstore.stopWriting()
|
||||||
|
if err := cstore.load(true); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &cstore, nil
|
return &cstore, nil
|
||||||
@ -337,31 +430,31 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
|||||||
if err := hasOverlappingRanges(options.GIDMap); err != nil {
|
if err := hasOverlappingRanges(options.GIDMap); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err == nil {
|
container = &Container{
|
||||||
container = &Container{
|
ID: id,
|
||||||
ID: id,
|
Names: names,
|
||||||
Names: names,
|
ImageID: image,
|
||||||
ImageID: image,
|
LayerID: layer,
|
||||||
LayerID: layer,
|
Metadata: metadata,
|
||||||
Metadata: metadata,
|
BigDataNames: []string{},
|
||||||
BigDataNames: []string{},
|
BigDataSizes: make(map[string]int64),
|
||||||
BigDataSizes: make(map[string]int64),
|
BigDataDigests: make(map[string]digest.Digest),
|
||||||
BigDataDigests: make(map[string]digest.Digest),
|
Created: time.Now().UTC(),
|
||||||
Created: time.Now().UTC(),
|
Flags: copyStringInterfaceMap(options.Flags),
|
||||||
Flags: copyStringInterfaceMap(options.Flags),
|
UIDMap: copyIDMap(options.UIDMap),
|
||||||
UIDMap: copyIDMap(options.UIDMap),
|
GIDMap: copyIDMap(options.GIDMap),
|
||||||
GIDMap: copyIDMap(options.GIDMap),
|
|
||||||
}
|
|
||||||
r.containers = append(r.containers, container)
|
|
||||||
r.byid[id] = container
|
|
||||||
r.idindex.Add(id)
|
|
||||||
r.bylayer[layer] = container
|
|
||||||
for _, name := range names {
|
|
||||||
r.byname[name] = container
|
|
||||||
}
|
|
||||||
err = r.Save()
|
|
||||||
container = copyContainer(container)
|
|
||||||
}
|
}
|
||||||
|
r.containers = append(r.containers, container)
|
||||||
|
r.byid[id] = container
|
||||||
|
// This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||||
|
// Implementing recovery from an unlikely and unimportant failure here would be too risky.
|
||||||
|
_ = r.idindex.Add(id)
|
||||||
|
r.bylayer[layer] = container
|
||||||
|
for _, name := range names {
|
||||||
|
r.byname[name] = container
|
||||||
|
}
|
||||||
|
err = r.Save()
|
||||||
|
container = copyContainer(container)
|
||||||
return container, err
|
return container, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -384,19 +477,6 @@ func (r *containerStore) removeName(container *Container, name string) {
|
|||||||
container.Names = stringSliceWithoutValue(container.Names, name)
|
container.Names = stringSliceWithoutValue(container.Names, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
|
|
||||||
func (r *containerStore) SetNames(id string, names []string) error {
|
|
||||||
return r.updateNames(id, names, setNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) AddNames(id string, names []string) error {
|
|
||||||
return r.updateNames(id, names, addNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) RemoveNames(id string, names []string) error {
|
|
||||||
return r.updateNames(id, names, removeNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error {
|
func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error {
|
||||||
container, ok := r.lookup(id)
|
container, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -434,7 +514,9 @@ func (r *containerStore) Delete(id string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete(r.byid, id)
|
delete(r.byid, id)
|
||||||
r.idindex.Delete(id)
|
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||||
|
// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
|
||||||
|
_ = r.idindex.Delete(id)
|
||||||
delete(r.bylayer, container.LayerID)
|
delete(r.bylayer, container.LayerID)
|
||||||
for _, name := range container.Names {
|
for _, name := range container.Names {
|
||||||
delete(r.byname, name)
|
delete(r.byname, name)
|
||||||
@ -612,50 +694,3 @@ func (r *containerStore) Wipe() error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) Lock() {
|
|
||||||
r.lockfile.Lock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) RecursiveLock() {
|
|
||||||
r.lockfile.RecursiveLock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) RLock() {
|
|
||||||
r.lockfile.RLock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) Unlock() {
|
|
||||||
r.lockfile.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) Touch() error {
|
|
||||||
return r.lockfile.Touch()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) Modified() (bool, error) {
|
|
||||||
return r.lockfile.Modified()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) IsReadWrite() bool {
|
|
||||||
return r.lockfile.IsReadWrite()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) TouchedSince(when time.Time) bool {
|
|
||||||
return r.lockfile.TouchedSince(when)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) Locked() bool {
|
|
||||||
return r.lockfile.Locked()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *containerStore) ReloadIfChanged() error {
|
|
||||||
r.loadMut.Lock()
|
|
||||||
defer r.loadMut.Unlock()
|
|
||||||
|
|
||||||
modified, err := r.Modified()
|
|
||||||
if err == nil && modified {
|
|
||||||
return r.Load()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
216
vendor/github.com/containers/storage/deprecated.go
generated
vendored
Normal file
216
vendor/github.com/containers/storage/deprecated.go
generated
vendored
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
drivers "github.com/containers/storage/drivers"
|
||||||
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The type definitions in this file exist ONLY to maintain formal API compatibility.
|
||||||
|
// DO NOT ADD ANY NEW METHODS TO THESE INTERFACES.
|
||||||
|
|
||||||
|
// ROFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type ROFileBasedStore interface {
|
||||||
|
Locker
|
||||||
|
Load() error
|
||||||
|
ReloadIfChanged() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// RWFileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type RWFileBasedStore interface {
|
||||||
|
Save() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileBasedStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type FileBasedStore interface {
|
||||||
|
ROFileBasedStore
|
||||||
|
RWFileBasedStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// ROMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type ROMetadataStore interface {
|
||||||
|
Metadata(id string) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RWMetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type RWMetadataStore interface {
|
||||||
|
SetMetadata(id, metadata string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetadataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type MetadataStore interface {
|
||||||
|
ROMetadataStore
|
||||||
|
RWMetadataStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// ROBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type ROBigDataStore interface {
|
||||||
|
BigData(id, key string) ([]byte, error)
|
||||||
|
BigDataSize(id, key string) (int64, error)
|
||||||
|
BigDataDigest(id, key string) (digest.Digest, error)
|
||||||
|
BigDataNames(id string) ([]string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RWImageBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type RWImageBigDataStore interface {
|
||||||
|
SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type ContainerBigDataStore interface {
|
||||||
|
ROBigDataStore
|
||||||
|
SetBigData(id, key string, data []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ROLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type ROLayerBigDataStore interface {
|
||||||
|
BigData(id, key string) (io.ReadCloser, error)
|
||||||
|
BigDataNames(id string) ([]string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RWLayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type RWLayerBigDataStore interface {
|
||||||
|
SetBigData(id, key string, data io.Reader) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerBigDataStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type LayerBigDataStore interface {
|
||||||
|
ROLayerBigDataStore
|
||||||
|
RWLayerBigDataStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlaggableStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type FlaggableStore interface {
|
||||||
|
ClearFlag(id string, flag string) error
|
||||||
|
SetFlag(id string, flag string, value interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type ContainerStore interface {
|
||||||
|
FileBasedStore
|
||||||
|
MetadataStore
|
||||||
|
ContainerBigDataStore
|
||||||
|
FlaggableStore
|
||||||
|
Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
|
||||||
|
SetNames(id string, names []string) error
|
||||||
|
AddNames(id string, names []string) error
|
||||||
|
RemoveNames(id string, names []string) error
|
||||||
|
Get(id string) (*Container, error)
|
||||||
|
Exists(id string) bool
|
||||||
|
Delete(id string) error
|
||||||
|
Wipe() error
|
||||||
|
Lookup(name string) (string, error)
|
||||||
|
Containers() ([]Container, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ROImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type ROImageStore interface {
|
||||||
|
ROFileBasedStore
|
||||||
|
ROMetadataStore
|
||||||
|
ROBigDataStore
|
||||||
|
Exists(id string) bool
|
||||||
|
Get(id string) (*Image, error)
|
||||||
|
Lookup(name string) (string, error)
|
||||||
|
Images() ([]Image, error)
|
||||||
|
ByDigest(d digest.Digest) ([]*Image, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type ImageStore interface {
|
||||||
|
ROImageStore
|
||||||
|
RWFileBasedStore
|
||||||
|
RWMetadataStore
|
||||||
|
RWImageBigDataStore
|
||||||
|
FlaggableStore
|
||||||
|
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
|
||||||
|
SetNames(id string, names []string) error
|
||||||
|
AddNames(id string, names []string) error
|
||||||
|
RemoveNames(id string, names []string) error
|
||||||
|
Delete(id string) error
|
||||||
|
Wipe() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ROLayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type ROLayerStore interface {
|
||||||
|
ROFileBasedStore
|
||||||
|
ROMetadataStore
|
||||||
|
ROLayerBigDataStore
|
||||||
|
Exists(id string) bool
|
||||||
|
Get(id string) (*Layer, error)
|
||||||
|
Status() ([][2]string, error)
|
||||||
|
Changes(from, to string) ([]archive.Change, error)
|
||||||
|
Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
|
||||||
|
DiffSize(from, to string) (int64, error)
|
||||||
|
Size(name string) (int64, error)
|
||||||
|
Lookup(name string) (string, error)
|
||||||
|
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
|
||||||
|
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
|
||||||
|
Layers() ([]Layer, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerStore is a deprecated interface with no documented way to use it from callers outside of c/storage.
|
||||||
|
//
|
||||||
|
// Deprecated: There is no way to use this from any external user of c/storage to invoke c/storage functionality.
|
||||||
|
type LayerStore interface {
|
||||||
|
ROLayerStore
|
||||||
|
RWFileBasedStore
|
||||||
|
RWMetadataStore
|
||||||
|
FlaggableStore
|
||||||
|
RWLayerBigDataStore
|
||||||
|
Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error)
|
||||||
|
CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
|
||||||
|
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
|
||||||
|
SetNames(id string, names []string) error
|
||||||
|
AddNames(id string, names []string) error
|
||||||
|
RemoveNames(id string, names []string) error
|
||||||
|
Delete(id string) error
|
||||||
|
Wipe() error
|
||||||
|
Mount(id string, options drivers.MountOpts) (string, error)
|
||||||
|
Unmount(id string, force bool) (bool, error)
|
||||||
|
Mounted(id string) (int, error)
|
||||||
|
ParentOwners(id string) (uids, gids []int, err error)
|
||||||
|
ApplyDiff(to string, diff io.Reader) (int64, error)
|
||||||
|
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||||
|
CleanupStagingDirectory(stagingDirectory string) error
|
||||||
|
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
|
||||||
|
DifferTarget(id string) (string, error)
|
||||||
|
LoadLocked() error
|
||||||
|
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
|
||||||
|
}
|
2
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@ -67,7 +67,7 @@ var (
|
|||||||
const defaultPerms = os.FileMode(0555)
|
const defaultPerms = os.FileMode(0555)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("aufs", Init)
|
graphdriver.MustRegister("aufs", Init)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Driver contains information about the filesystem mounted.
|
// Driver contains information about the filesystem mounted.
|
||||||
|
1
vendor/github.com/containers/storage/drivers/aufs/mount.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/aufs/mount.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
package aufs
|
package aufs
|
||||||
|
2
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -42,7 +42,7 @@ import (
|
|||||||
const defaultPerms = os.FileMode(0555)
|
const defaultPerms = os.FileMode(0555)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("btrfs", Init)
|
graphdriver.MustRegister("btrfs", Init)
|
||||||
}
|
}
|
||||||
|
|
||||||
type btrfsOptions struct {
|
type btrfsOptions struct {
|
||||||
|
1
vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux || !cgo
|
||||||
// +build !linux !cgo
|
// +build !linux !cgo
|
||||||
|
|
||||||
package btrfs
|
package btrfs
|
||||||
|
1
vendor/github.com/containers/storage/drivers/btrfs/version.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/btrfs/version.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && !btrfs_noversion && cgo
|
||||||
// +build linux,!btrfs_noversion,cgo
|
// +build linux,!btrfs_noversion,cgo
|
||||||
|
|
||||||
package btrfs
|
package btrfs
|
||||||
|
6
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
6
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
@ -115,7 +115,7 @@ func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater {
|
|||||||
// on-disk owner UIDs and GIDs which are "host" values in the first map with
|
// on-disk owner UIDs and GIDs which are "host" values in the first map with
|
||||||
// UIDs and GIDs for "host" values from the second map which correspond to the
|
// UIDs and GIDs for "host" values from the second map which correspond to the
|
||||||
// same "container" IDs.
|
// same "container" IDs.
|
||||||
func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) (retErr error) {
|
||||||
driver := n.ProtoDriver
|
driver := n.ProtoDriver
|
||||||
options := MountOpts{
|
options := MountOpts{
|
||||||
MountLabel: mountLabel,
|
MountLabel: mountLabel,
|
||||||
@ -124,9 +124,7 @@ func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer driverPut(driver, id, &retErr)
|
||||||
driver.Put(id)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return ChownPathByMaps(layerFs, toContainer, toHost)
|
return ChownPathByMaps(layerFs, toContainer, toHost)
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/containers/storage/drivers/chown_darwin.go
generated
vendored
6
vendor/github.com/containers/storage/drivers/chown_darwin.go
generated
vendored
@ -83,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
|
|||||||
uid, gid = mappedPair.UID, mappedPair.GID
|
uid, gid = mappedPair.UID, mappedPair.GID
|
||||||
}
|
}
|
||||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||||
cap, err := system.Lgetxattr(path, "security.capability")
|
capability, err := system.Lgetxattr(path, "security.capability")
|
||||||
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
|
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
|
||||||
return fmt.Errorf("%s: %w", os.Args[0], err)
|
return fmt.Errorf("%s: %w", os.Args[0], err)
|
||||||
}
|
}
|
||||||
@ -98,8 +98,8 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
|
|||||||
return fmt.Errorf("%s: %w", os.Args[0], err)
|
return fmt.Errorf("%s: %w", os.Args[0], err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cap != nil {
|
if capability != nil {
|
||||||
if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
|
if err := system.Lsetxattr(path, "security.capability", capability, 0); err != nil {
|
||||||
return fmt.Errorf("%s: %w", os.Args[0], err)
|
return fmt.Errorf("%s: %w", os.Args[0], err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
7
vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
|
//go:build !linux || !cgo
|
||||||
// +build !linux !cgo
|
// +build !linux !cgo
|
||||||
|
|
||||||
package copy
|
package copy //nolint: predeclared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
@ -24,7 +25,7 @@ func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CopyRegularToFile copies the content of a file to another
|
// CopyRegularToFile copies the content of a file to another
|
||||||
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
|
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters"
|
||||||
f, err := os.Open(srcPath)
|
f, err := os.Open(srcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -35,6 +36,6 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CopyRegular copies the content of a file to another
|
// CopyRegular copies the content of a file to another
|
||||||
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
|
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive // "func name will be used as copy.CopyRegular by other packages, and that stutters"
|
||||||
return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath)
|
return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath)
|
||||||
}
|
}
|
||||||
|
1
vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package devmapper
|
package devmapper
|
||||||
|
2
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
|||||||
const defaultPerms = os.FileMode(0555)
|
const defaultPerms = os.FileMode(0555)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("devicemapper", Init)
|
graphdriver.MustRegister("devicemapper", Init)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Driver contains the device set mounted and the home directory
|
// Driver contains the device set mounted and the home directory
|
||||||
|
1
vendor/github.com/containers/storage/drivers/devmapper/mount.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/devmapper/mount.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package devmapper
|
package devmapper
|
||||||
|
37
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
37
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -8,13 +8,12 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/vbatts/tar-split/tar/storage"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/directory"
|
"github.com/containers/storage/pkg/directory"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/vbatts/tar-split/tar/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FsMagic unsigned id of the filesystem in use.
|
// FsMagic unsigned id of the filesystem in use.
|
||||||
@ -39,7 +38,7 @@ var (
|
|||||||
ErrLayerUnknown = errors.New("unknown layer")
|
ErrLayerUnknown = errors.New("unknown layer")
|
||||||
)
|
)
|
||||||
|
|
||||||
//CreateOpts contains optional arguments for Create() and CreateReadWrite()
|
// CreateOpts contains optional arguments for Create() and CreateReadWrite()
|
||||||
// methods.
|
// methods.
|
||||||
type CreateOpts struct {
|
type CreateOpts struct {
|
||||||
MountLabel string
|
MountLabel string
|
||||||
@ -53,8 +52,8 @@ type MountOpts struct {
|
|||||||
// Mount label is the MAC Labels to assign to mount point (SELINUX)
|
// Mount label is the MAC Labels to assign to mount point (SELINUX)
|
||||||
MountLabel string
|
MountLabel string
|
||||||
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
|
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
|
||||||
UidMaps []idtools.IDMap // nolint: golint
|
UidMaps []idtools.IDMap //nolint: golint,revive
|
||||||
GidMaps []idtools.IDMap // nolint: golint
|
GidMaps []idtools.IDMap //nolint: golint
|
||||||
Options []string
|
Options []string
|
||||||
|
|
||||||
// Volatile specifies whether the container storage can be optimized
|
// Volatile specifies whether the container storage can be optimized
|
||||||
@ -279,6 +278,14 @@ func init() {
|
|||||||
drivers = make(map[string]InitFunc)
|
drivers = make(map[string]InitFunc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MustRegister registers an InitFunc for the driver, or panics.
|
||||||
|
// It is suitable for package’s init() sections.
|
||||||
|
func MustRegister(name string, initFunc InitFunc) {
|
||||||
|
if err := Register(name, initFunc); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to register containers/storage graph driver %q: %v", name, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Register registers an InitFunc for the driver.
|
// Register registers an InitFunc for the driver.
|
||||||
func Register(name string, initFunc InitFunc) error {
|
func Register(name string, initFunc InitFunc) error {
|
||||||
if _, exists := drivers[name]; exists {
|
if _, exists := drivers[name]; exists {
|
||||||
@ -405,3 +412,21 @@ func scanPriorDrivers(root string) map[string]bool {
|
|||||||
}
|
}
|
||||||
return driversMap
|
return driversMap
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// driverPut is driver.Put, but errors are handled either by updating mainErr or just logging.
|
||||||
|
// Typical usage:
|
||||||
|
//
|
||||||
|
// func …(…) (err error) {
|
||||||
|
// …
|
||||||
|
// defer driverPut(driver, id, &err)
|
||||||
|
// }
|
||||||
|
func driverPut(driver ProtoDriver, id string, mainErr *error) {
|
||||||
|
if err := driver.Put(id); err != nil {
|
||||||
|
err = fmt.Errorf("unmounting layer %s: %w", id, err)
|
||||||
|
if *mainErr == nil {
|
||||||
|
*mainErr = err
|
||||||
|
} else {
|
||||||
|
logrus.Errorf(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
1
vendor/github.com/containers/storage/drivers/driver_solaris.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/driver_solaris.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build solaris && cgo
|
||||||
// +build solaris,cgo
|
// +build solaris,cgo
|
||||||
|
|
||||||
package graphdriver
|
package graphdriver
|
||||||
|
1
vendor/github.com/containers/storage/drivers/driver_unsupported.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/driver_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux && !windows && !freebsd && !solaris && !darwin
|
||||||
// +build !linux,!windows,!freebsd,!solaris,!darwin
|
// +build !linux,!windows,!freebsd,!solaris,!darwin
|
||||||
|
|
||||||
package graphdriver
|
package graphdriver
|
||||||
|
20
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
20
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
@ -65,7 +65,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
|||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
driver.Put(id)
|
driverPut(driver, id, &err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
|||||||
}
|
}
|
||||||
return ioutils.NewReadCloserWrapper(archive, func() error {
|
return ioutils.NewReadCloserWrapper(archive, func() error {
|
||||||
err := archive.Close()
|
err := archive.Close()
|
||||||
driver.Put(id)
|
driverPut(driver, id, &err)
|
||||||
return err
|
return err
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
@ -90,7 +90,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer driver.Put(parent)
|
defer driverPut(driver, parent, &err)
|
||||||
|
|
||||||
changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings)
|
changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -104,7 +104,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
|||||||
|
|
||||||
return ioutils.NewReadCloserWrapper(archive, func() error {
|
return ioutils.NewReadCloserWrapper(archive, func() error {
|
||||||
err := archive.Close()
|
err := archive.Close()
|
||||||
driver.Put(id)
|
driverPut(driver, id, &err)
|
||||||
|
|
||||||
// NaiveDiffDriver compares file metadata with parent layers. Parent layers
|
// NaiveDiffDriver compares file metadata with parent layers. Parent layers
|
||||||
// are extracted from tar's with full second precision on modified time.
|
// are extracted from tar's with full second precision on modified time.
|
||||||
@ -117,7 +117,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
|||||||
|
|
||||||
// Changes produces a list of changes between the specified layer
|
// Changes produces a list of changes between the specified layer
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||||
func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) {
|
func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ []archive.Change, retErr error) {
|
||||||
driver := gdw.ProtoDriver
|
driver := gdw.ProtoDriver
|
||||||
|
|
||||||
if idMappings == nil {
|
if idMappings == nil {
|
||||||
@ -134,7 +134,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer driver.Put(id)
|
defer driverPut(driver, id, &retErr)
|
||||||
|
|
||||||
parentFs := ""
|
parentFs := ""
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer driver.Put(parent)
|
defer driverPut(driver, parent, &retErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings)
|
return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings)
|
||||||
@ -171,10 +171,10 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer driver.Put(id)
|
defer driverPut(driver, id, &err)
|
||||||
|
|
||||||
defaultForceMask := os.FileMode(0700)
|
defaultForceMask := os.FileMode(0700)
|
||||||
var forceMask *os.FileMode = nil
|
var forceMask *os.FileMode // = nil
|
||||||
if runtime.GOOS == "darwin" {
|
if runtime.GOOS == "darwin" {
|
||||||
forceMask = &defaultForceMask
|
forceMask = &defaultForceMask
|
||||||
}
|
}
|
||||||
@ -224,7 +224,7 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer driver.Put(id)
|
defer driverPut(driver, id, &err)
|
||||||
|
|
||||||
return archive.ChangesSize(layerFs, changes), nil
|
return archive.ChangesSize(layerFs, changes), nil
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
generated
vendored
10
vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
generated
vendored
@ -6,6 +6,7 @@ package overlay
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
@ -112,7 +113,14 @@ func createIDMappedMount(source, target string, pid int) error {
|
|||||||
// createUsernsProcess forks the current process and creates a user namespace using the specified
|
// createUsernsProcess forks the current process and creates a user namespace using the specified
|
||||||
// mappings. It returns the pid of the new process.
|
// mappings. It returns the pid of the new process.
|
||||||
func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) {
|
func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) {
|
||||||
pid, _, err := syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0)
|
var pid uintptr
|
||||||
|
var err syscall.Errno
|
||||||
|
|
||||||
|
if runtime.GOARCH == "s390x" {
|
||||||
|
pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), 0, unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0)
|
||||||
|
} else {
|
||||||
|
pid, _, err = syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0)
|
||||||
|
}
|
||||||
if err != 0 {
|
if err != 0 {
|
||||||
return -1, nil, err
|
return -1, nil, err
|
||||||
}
|
}
|
||||||
|
97
vendor/github.com/containers/storage/drivers/overlay/mount.go
generated
vendored
97
vendor/github.com/containers/storage/drivers/overlay/mount.go
generated
vendored
@ -8,14 +8,16 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/reexec"
|
"github.com/containers/storage/pkg/reexec"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
reexec.Register("storage-mountfrom", mountFromMain)
|
reexec.Register("storage-mountfrom", mountOverlayFromMain)
|
||||||
}
|
}
|
||||||
|
|
||||||
func fatal(err error) {
|
func fatal(err error) {
|
||||||
@ -31,7 +33,7 @@ type mountOptions struct {
|
|||||||
Flag uint32
|
Flag uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func mountFrom(dir, device, target, mType string, flags uintptr, label string) error {
|
func mountOverlayFrom(dir, device, target, mType string, flags uintptr, label string) error {
|
||||||
options := &mountOptions{
|
options := &mountOptions{
|
||||||
Device: device,
|
Device: device,
|
||||||
Target: target,
|
Target: target,
|
||||||
@ -67,7 +69,7 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mountfromMain is the entry-point for storage-mountfrom on re-exec.
|
// mountfromMain is the entry-point for storage-mountfrom on re-exec.
|
||||||
func mountFromMain() {
|
func mountOverlayFromMain() {
|
||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
@ -77,11 +79,96 @@ func mountFromMain() {
|
|||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Chdir(flag.Arg(0)); err != nil {
|
// Mount the arguments passed from the specified directory. Some of the
|
||||||
|
// paths mentioned in the values we pass to the kernel are relative to
|
||||||
|
// the specified directory.
|
||||||
|
homedir := flag.Arg(0)
|
||||||
|
if err := os.Chdir(homedir); err != nil {
|
||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
|
pageSize := unix.Getpagesize()
|
||||||
|
if len(options.Label) < pageSize {
|
||||||
|
if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Those arguments still took up too much space. Open the diff
|
||||||
|
// directories and use their descriptor numbers as lowers, using
|
||||||
|
// /proc/self/fd as the current directory.
|
||||||
|
|
||||||
|
// Split out the various options, since we need to manipulate the
|
||||||
|
// paths, but we don't want to mess with other options.
|
||||||
|
var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string
|
||||||
|
for _, arg := range strings.Split(options.Label, ",") {
|
||||||
|
kv := strings.SplitN(arg, "=", 2)
|
||||||
|
switch kv[0] {
|
||||||
|
case "upperdir":
|
||||||
|
upperk = "upperdir="
|
||||||
|
upperv = kv[1]
|
||||||
|
case "workdir":
|
||||||
|
workk = "workdir="
|
||||||
|
workv = kv[1]
|
||||||
|
case "lowerdir":
|
||||||
|
lowerk = "lowerdir="
|
||||||
|
lowerv = kv[1]
|
||||||
|
case "label":
|
||||||
|
labelk = "label="
|
||||||
|
labelv = kv[1]
|
||||||
|
default:
|
||||||
|
if others == "" {
|
||||||
|
others = arg
|
||||||
|
} else {
|
||||||
|
others = others + "," + arg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure upperdir, workdir, and the target are absolute paths.
|
||||||
|
if upperv != "" && !filepath.IsAbs(upperv) {
|
||||||
|
upperv = filepath.Join(homedir, upperv)
|
||||||
|
}
|
||||||
|
if workv != "" && !filepath.IsAbs(workv) {
|
||||||
|
workv = filepath.Join(homedir, workv)
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(options.Target) {
|
||||||
|
options.Target = filepath.Join(homedir, options.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a descriptor for each lower, and use that descriptor's name as
|
||||||
|
// the new value for the list of lowers, because it's shorter.
|
||||||
|
if lowerv != "" {
|
||||||
|
lowers := strings.Split(lowerv, ":")
|
||||||
|
for i := range lowers {
|
||||||
|
lowerFd, err := unix.Open(lowers[i], unix.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
lowers[i] = fmt.Sprintf("%d", lowerFd)
|
||||||
|
}
|
||||||
|
lowerv = strings.Join(lowers, ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reconstruct the Label field.
|
||||||
|
options.Label = upperk + upperv + "," + workk + workv + "," + lowerk + lowerv + "," + labelk + labelv + "," + others
|
||||||
|
options.Label = strings.ReplaceAll(options.Label, ",,", ",")
|
||||||
|
|
||||||
|
// Okay, try this, if we managed to make the arguments fit.
|
||||||
|
var err error
|
||||||
|
if len(options.Label) < pageSize {
|
||||||
|
if err := os.Chdir("/proc/self/fd"); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
err = unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("cannot mount layer, mount data %q too large %d >= page size %d", options.Label, len(options.Label), pageSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up.
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "creating overlay mount to %s, mount_data=%q\n", options.Target, options.Label)
|
||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
62
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
62
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -73,21 +73,23 @@ const (
|
|||||||
// or root directory. Mounts are always done relative to root and
|
// or root directory. Mounts are always done relative to root and
|
||||||
// referencing the symbolic links in order to ensure the number of
|
// referencing the symbolic links in order to ensure the number of
|
||||||
// lower directories can fit in a single page for making the mount
|
// lower directories can fit in a single page for making the mount
|
||||||
// syscall. A hard upper limit of 128 lower layers is enforced to ensure
|
// syscall. A hard upper limit of 500 lower layers is enforced to ensure
|
||||||
// that mounts do not fail due to length.
|
// that mounts do not fail due to length.
|
||||||
|
|
||||||
const (
|
const (
|
||||||
linkDir = "l"
|
linkDir = "l"
|
||||||
lowerFile = "lower"
|
lowerFile = "lower"
|
||||||
maxDepth = 128
|
maxDepth = 500
|
||||||
|
|
||||||
// idLength represents the number of random characters
|
// idLength represents the number of random characters
|
||||||
// which can be used to create the unique link identifier
|
// which can be used to create the unique link identifier
|
||||||
// for every layer. If this value is too long then the
|
// for every layer. If this value is too long then the
|
||||||
// page size limit for the mount command may be exceeded.
|
// page size limit for the mount command may be exceeded.
|
||||||
// The idLength should be selected such that following equation
|
// The idLength should be selected such that following equation
|
||||||
// is true (512 is a buffer for label metadata).
|
// is true (512 is a buffer for label metadata, 128 is the
|
||||||
// ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512)
|
// number of lowers we want to be able to use without having
|
||||||
|
// to use bind mounts to get all the way to the kernel limit).
|
||||||
|
// ((idLength + len(linkDir) + 1) * 128) <= (pageSize - 512)
|
||||||
idLength = 26
|
idLength = 26
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -140,8 +142,8 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("overlay", Init)
|
graphdriver.MustRegister("overlay", Init)
|
||||||
graphdriver.Register("overlay2", Init)
|
graphdriver.MustRegister("overlay2", Init)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasMetacopyOption(opts []string) bool {
|
func hasMetacopyOption(opts []string) bool {
|
||||||
@ -309,9 +311,11 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
|
fsName, ok := graphdriver.FsNames[fsMagic]
|
||||||
backingFs = fsName
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("filesystem type %#x reported for %s is not supported with 'overlay': %w", fsMagic, filepath.Dir(home), graphdriver.ErrIncompatibleFS)
|
||||||
}
|
}
|
||||||
|
backingFs = fsName
|
||||||
|
|
||||||
runhome := filepath.Join(options.RunRoot, filepath.Base(home))
|
runhome := filepath.Join(options.RunRoot, filepath.Base(home))
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
||||||
@ -1374,27 +1378,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
return "", errors.New("max depth exceeded")
|
return "", errors.New("max depth exceeded")
|
||||||
}
|
}
|
||||||
|
|
||||||
// absLowers is the list of lowers as absolute paths, which works well with additional stores.
|
// absLowers is the list of lowers as absolute paths.
|
||||||
absLowers := []string{}
|
absLowers := []string{}
|
||||||
// relLowers is the list of lowers as paths relative to the driver's home directory.
|
|
||||||
relLowers := []string{}
|
|
||||||
|
|
||||||
// Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers
|
|
||||||
// lists that we're building. "diff" itself is the upper, so it won't be in the lists.
|
|
||||||
link, err := os.ReadFile(path.Join(dir, "link"))
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link"))
|
|
||||||
if err := d.recreateSymlinks(); err != nil {
|
|
||||||
return "", fmt.Errorf("recreating the links: %w", err)
|
|
||||||
}
|
|
||||||
link, err = os.ReadFile(path.Join(dir, "link"))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
diffN := 1
|
diffN := 1
|
||||||
perms := defaultPerms
|
perms := defaultPerms
|
||||||
if d.options.forceMask != nil {
|
if d.options.forceMask != nil {
|
||||||
@ -1408,7 +1394,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
}
|
}
|
||||||
for err == nil {
|
for err == nil {
|
||||||
absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
|
absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
|
||||||
relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN)))
|
|
||||||
diffN++
|
diffN++
|
||||||
st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
|
st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
|
||||||
if err == nil && !permsKnown {
|
if err == nil && !permsKnown {
|
||||||
@ -1433,6 +1418,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
perms = os.FileMode(st2.Mode())
|
perms = os.FileMode(st2.Mode())
|
||||||
permsKnown = true
|
permsKnown = true
|
||||||
}
|
}
|
||||||
|
l = lower
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
lower = ""
|
lower = ""
|
||||||
@ -1457,12 +1443,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
lower = newpath
|
lower = newpath
|
||||||
}
|
}
|
||||||
absLowers = append(absLowers, lower)
|
absLowers = append(absLowers, lower)
|
||||||
relLowers = append(relLowers, l)
|
|
||||||
diffN = 1
|
diffN = 1
|
||||||
_, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
_, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
||||||
for err == nil {
|
for err == nil {
|
||||||
absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
||||||
relLowers = append(relLowers, dumbJoin(l, "..", nameWithSuffix("diff", diffN)))
|
|
||||||
diffN++
|
diffN++
|
||||||
_, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
_, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
|
||||||
}
|
}
|
||||||
@ -1470,7 +1454,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
|
|
||||||
if len(absLowers) == 0 {
|
if len(absLowers) == 0 {
|
||||||
absLowers = append(absLowers, path.Join(dir, "empty"))
|
absLowers = append(absLowers, path.Join(dir, "empty"))
|
||||||
relLowers = append(relLowers, path.Join(id, "empty"))
|
|
||||||
}
|
}
|
||||||
// user namespace requires this to move a directory from lower to upper.
|
// user namespace requires this to move a directory from lower to upper.
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||||
@ -1604,28 +1587,23 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
} else if len(mountData) >= pageSize {
|
} else if len(mountData) >= pageSize {
|
||||||
// Use relative paths and mountFrom when the mount data has exceeded
|
// Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if
|
||||||
// the page size. The mount syscall fails if the mount data cannot
|
// the mount data cannot fit within a page and relative links make the mount data much
|
||||||
// fit within a page and relative links make the mount data much
|
// smaller at the expense of requiring a fork exec to chdir().
|
||||||
// smaller at the expense of requiring a fork exec to chroot.
|
|
||||||
|
|
||||||
workdir = path.Join(id, "work")
|
workdir = path.Join(id, "work")
|
||||||
//FIXME: We need to figure out to get this to work with additional stores
|
|
||||||
if readWrite {
|
if readWrite {
|
||||||
diffDir := path.Join(id, "diff")
|
diffDir := path.Join(id, "diff")
|
||||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir)
|
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir)
|
||||||
} else {
|
} else {
|
||||||
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(relLowers, ":"))
|
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
|
||||||
}
|
}
|
||||||
if len(optsList) > 0 {
|
if len(optsList) > 0 {
|
||||||
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
|
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
|
||||||
}
|
}
|
||||||
mountData = label.FormatMountLabel(opts, options.MountLabel)
|
mountData = label.FormatMountLabel(opts, options.MountLabel)
|
||||||
if len(mountData) >= pageSize {
|
|
||||||
return "", fmt.Errorf("cannot mount layer, mount label %q too large %d >= page size %d", options.MountLabel, len(mountData), pageSize)
|
|
||||||
}
|
|
||||||
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
|
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||||
return mountFrom(d.home, source, target, mType, flags, label)
|
return mountOverlayFrom(d.home, source, target, mType, flags, label)
|
||||||
}
|
}
|
||||||
mountTarget = path.Join(id, "merged")
|
mountTarget = path.Join(id, "merged")
|
||||||
}
|
}
|
||||||
|
1
vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package overlay
|
package overlay
|
||||||
|
1
vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && !cgo
|
||||||
// +build linux,!cgo
|
// +build linux,!cgo
|
||||||
|
|
||||||
package overlay
|
package overlay
|
||||||
|
1
vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package overlay
|
package overlay
|
||||||
|
56
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
56
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
@ -51,6 +51,7 @@ struct fsxattr {
|
|||||||
*/
|
*/
|
||||||
import "C"
|
import "C"
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
@ -78,6 +79,7 @@ type Control struct {
|
|||||||
backingFsBlockDev string
|
backingFsBlockDev string
|
||||||
nextProjectID uint32
|
nextProjectID uint32
|
||||||
quotas map[string]uint32
|
quotas map[string]uint32
|
||||||
|
basePath string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to generate a unigue projectid. Multiple directories
|
// Attempt to generate a unigue projectid. Multiple directories
|
||||||
@ -158,20 +160,22 @@ func NewControl(basePath string) (*Control, error) {
|
|||||||
Size: 0,
|
Size: 0,
|
||||||
Inodes: 0,
|
Inodes: 0,
|
||||||
}
|
}
|
||||||
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
q := Control{
|
q := Control{
|
||||||
backingFsBlockDev: backingFsBlockDev,
|
backingFsBlockDev: backingFsBlockDev,
|
||||||
nextProjectID: minProjectID + 1,
|
nextProjectID: minProjectID + 1,
|
||||||
quotas: make(map[string]uint32),
|
quotas: make(map[string]uint32),
|
||||||
|
basePath: basePath,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := q.setProjectQuota(minProjectID, quota); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// get first project id to be used for next container
|
// get first project id to be used for next container
|
||||||
//
|
//
|
||||||
err = q.findNextProjectID(basePath)
|
err = q.findNextProjectID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -204,11 +208,11 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
|||||||
// set the quota limit for the container's project id
|
// set the quota limit for the container's project id
|
||||||
//
|
//
|
||||||
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
|
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
|
||||||
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
|
return q.setProjectQuota(projectID, quota)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setProjectQuota - set the quota for project id on xfs block device
|
// setProjectQuota - set the quota for project id on xfs block device
|
||||||
func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
|
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
|
||||||
var d C.fs_disk_quota_t
|
var d C.fs_disk_quota_t
|
||||||
d.d_version = C.FS_DQUOT_VERSION
|
d.d_version = C.FS_DQUOT_VERSION
|
||||||
d.d_id = C.__u32(projectID)
|
d.d_id = C.__u32(projectID)
|
||||||
@ -225,15 +229,35 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
|
|||||||
d.d_ino_softlimit = d.d_ino_hardlimit
|
d.d_ino_softlimit = d.d_ino_hardlimit
|
||||||
}
|
}
|
||||||
|
|
||||||
var cs = C.CString(backingFsBlockDev)
|
var cs = C.CString(q.backingFsBlockDev)
|
||||||
defer C.free(unsafe.Pointer(cs))
|
defer C.free(unsafe.Pointer(cs))
|
||||||
|
|
||||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
runQuotactl := func() syscall.Errno {
|
||||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||||
if errno != 0 {
|
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
|
||||||
|
errno := runQuotactl()
|
||||||
|
|
||||||
|
// If the backingFsBlockDev does not exist any more then try to recreate it.
|
||||||
|
if errors.Is(errno, unix.ENOENT) {
|
||||||
|
if _, err := makeBackingFsDev(q.basePath); err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"failed to recreate missing backingFsBlockDev %s for projid %d: %w",
|
||||||
|
q.backingFsBlockDev, projectID, err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errno := runQuotactl(); errno != 0 {
|
||||||
|
return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
|
||||||
|
projectID, q.backingFsBlockDev, errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if errno != 0 {
|
||||||
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
|
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
|
||||||
projectID, backingFsBlockDev, errno)
|
projectID, q.backingFsBlockDev, errno)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -332,16 +356,16 @@ func setProjectID(targetPath string, projectID uint32) error {
|
|||||||
|
|
||||||
// findNextProjectID - find the next project id to be used for containers
|
// findNextProjectID - find the next project id to be used for containers
|
||||||
// by scanning driver home directory to find used project ids
|
// by scanning driver home directory to find used project ids
|
||||||
func (q *Control) findNextProjectID(home string) error {
|
func (q *Control) findNextProjectID() error {
|
||||||
files, err := os.ReadDir(home)
|
files, err := os.ReadDir(q.basePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("read directory failed : %s", home)
|
return fmt.Errorf("read directory failed : %s", q.basePath)
|
||||||
}
|
}
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if !file.IsDir() {
|
if !file.IsDir() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
path := filepath.Join(home, file.Name())
|
path := filepath.Join(q.basePath, file.Name())
|
||||||
projid, err := getProjectID(path)
|
projid, err := getProjectID(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
1
vendor/github.com/containers/storage/drivers/register/register_aufs.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/register/register_aufs.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !exclude_graphdriver_aufs && linux
|
||||||
// +build !exclude_graphdriver_aufs,linux
|
// +build !exclude_graphdriver_aufs,linux
|
||||||
|
|
||||||
package register
|
package register
|
||||||
|
1
vendor/github.com/containers/storage/drivers/register/register_btrfs.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/register/register_btrfs.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !exclude_graphdriver_btrfs && linux
|
||||||
// +build !exclude_graphdriver_btrfs,linux
|
// +build !exclude_graphdriver_btrfs,linux
|
||||||
|
|
||||||
package register
|
package register
|
||||||
|
1
vendor/github.com/containers/storage/drivers/register/register_devicemapper.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/register/register_devicemapper.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !exclude_graphdriver_devicemapper && linux && cgo
|
||||||
// +build !exclude_graphdriver_devicemapper,linux,cgo
|
// +build !exclude_graphdriver_devicemapper,linux,cgo
|
||||||
|
|
||||||
package register
|
package register
|
||||||
|
1
vendor/github.com/containers/storage/drivers/register/register_overlay.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/register/register_overlay.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !exclude_graphdriver_overlay && linux && cgo
|
||||||
// +build !exclude_graphdriver_overlay,linux,cgo
|
// +build !exclude_graphdriver_overlay,linux,cgo
|
||||||
|
|
||||||
package register
|
package register
|
||||||
|
1
vendor/github.com/containers/storage/drivers/register/register_zfs.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/register/register_zfs.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build (!exclude_graphdriver_zfs && linux) || (!exclude_graphdriver_zfs && freebsd) || solaris
|
||||||
// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris
|
// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris
|
||||||
|
|
||||||
package register
|
package register
|
||||||
|
3
vendor/github.com/containers/storage/drivers/template.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/template.go
generated
vendored
@ -1,9 +1,8 @@
|
|||||||
package graphdriver
|
package graphdriver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TemplateDriver is just barely enough of a driver that we can implement a
|
// TemplateDriver is just barely enough of a driver that we can implement a
|
||||||
|
1
vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package vfs // import "github.com/containers/storage/drivers/vfs"
|
package vfs // import "github.com/containers/storage/drivers/vfs"
|
||||||
|
6
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
6
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -28,7 +28,7 @@ var (
|
|||||||
const defaultPerms = os.FileMode(0555)
|
const defaultPerms = os.FileMode(0555)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("vfs", Init)
|
graphdriver.MustRegister("vfs", Init)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init returns a new VFS driver.
|
// Init returns a new VFS driver.
|
||||||
@ -98,7 +98,7 @@ func (d *Driver) Status() [][2]string {
|
|||||||
|
|
||||||
// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data.
|
// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data.
|
||||||
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||||
return nil, nil
|
return nil, nil //nolint: nilnil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.
|
// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.
|
||||||
@ -194,7 +194,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
|||||||
if parent != "" {
|
if parent != "" {
|
||||||
parentDir, err := d.Get(parent, graphdriver.MountOpts{})
|
parentDir, err := d.Get(parent, graphdriver.MountOpts{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s: %s", parent, err)
|
return fmt.Errorf("%s: %w", parent, err)
|
||||||
}
|
}
|
||||||
if err := dirCopy(parentDir, dir); err != nil {
|
if err := dirCopy(parentDir, dir); err != nil {
|
||||||
return err
|
return err
|
||||||
|
2
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@ -53,7 +53,7 @@ var (
|
|||||||
|
|
||||||
// init registers the windows graph drivers to the register.
|
// init registers the windows graph drivers to the register.
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("windowsfilter", InitFilter)
|
graphdriver.MustRegister("windowsfilter", InitFilter)
|
||||||
// DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes
|
// DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes
|
||||||
// debugging issues in the re-exec codepath significantly easier.
|
// debugging issues in the re-exec codepath significantly easier.
|
||||||
if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" {
|
if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" {
|
||||||
|
2
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@ -33,7 +33,7 @@ type zfsOptions struct {
|
|||||||
const defaultPerms = os.FileMode(0555)
|
const defaultPerms = os.FileMode(0555)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("zfs", Init)
|
graphdriver.MustRegister("zfs", Init)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Logger returns a zfs logger implementation.
|
// Logger returns a zfs logger implementation.
|
||||||
|
9
vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
generated
vendored
9
vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
generated
vendored
@ -1,11 +1,4 @@
|
|||||||
|
//go:build !linux && !freebsd
|
||||||
// +build !linux,!freebsd
|
// +build !linux,!freebsd
|
||||||
|
|
||||||
package zfs
|
package zfs
|
||||||
|
|
||||||
func checkRootdirFs(rootdir string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMountpoint(id string) string {
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
332
vendor/github.com/containers/storage/images.go
generated
vendored
332
vendor/github.com/containers/storage/images.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -94,11 +93,17 @@ type Image struct {
|
|||||||
Flags map[string]interface{} `json:"flags,omitempty"`
|
Flags map[string]interface{} `json:"flags,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ROImageStore provides bookkeeping for information about Images.
|
// roImageStore provides bookkeeping for information about Images.
|
||||||
type ROImageStore interface {
|
type roImageStore interface {
|
||||||
ROFileBasedStore
|
roMetadataStore
|
||||||
ROMetadataStore
|
roBigDataStore
|
||||||
ROBigDataStore
|
|
||||||
|
// startReading makes sure the store is fresh, and locks it for reading.
|
||||||
|
// If this succeeds, the caller MUST call stopReading().
|
||||||
|
startReading() error
|
||||||
|
|
||||||
|
// stopReading releases locks obtained by startReading.
|
||||||
|
stopReading()
|
||||||
|
|
||||||
// Exists checks if there is an image with the given ID or name.
|
// Exists checks if there is an image with the given ID or name.
|
||||||
Exists(id string) bool
|
Exists(id string) bool
|
||||||
@ -106,10 +111,6 @@ type ROImageStore interface {
|
|||||||
// Get retrieves information about an image given an ID or name.
|
// Get retrieves information about an image given an ID or name.
|
||||||
Get(id string) (*Image, error)
|
Get(id string) (*Image, error)
|
||||||
|
|
||||||
// Lookup attempts to translate a name to an ID. Most methods do this
|
|
||||||
// implicitly.
|
|
||||||
Lookup(name string) (string, error)
|
|
||||||
|
|
||||||
// Images returns a slice enumerating the known images.
|
// Images returns a slice enumerating the known images.
|
||||||
Images() ([]Image, error)
|
Images() ([]Image, error)
|
||||||
|
|
||||||
@ -120,34 +121,29 @@ type ROImageStore interface {
|
|||||||
ByDigest(d digest.Digest) ([]*Image, error)
|
ByDigest(d digest.Digest) ([]*Image, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageStore provides bookkeeping for information about Images.
|
// rwImageStore provides bookkeeping for information about Images.
|
||||||
type ImageStore interface {
|
type rwImageStore interface {
|
||||||
ROImageStore
|
roImageStore
|
||||||
RWFileBasedStore
|
rwMetadataStore
|
||||||
RWMetadataStore
|
rwImageBigDataStore
|
||||||
RWImageBigDataStore
|
flaggableStore
|
||||||
FlaggableStore
|
|
||||||
|
// startWriting makes sure the store is fresh, and locks it for writing.
|
||||||
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
|
startWriting() error
|
||||||
|
|
||||||
|
// stopWriting releases locks obtained by startWriting.
|
||||||
|
stopWriting()
|
||||||
|
|
||||||
// Create creates an image that has a specified ID (or a random one) and
|
// Create creates an image that has a specified ID (or a random one) and
|
||||||
// optional names, using the specified layer as its topmost (hopefully
|
// optional names, using the specified layer as its topmost (hopefully
|
||||||
// read-only) layer. That layer can be referenced by multiple images.
|
// read-only) layer. That layer can be referenced by multiple images.
|
||||||
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
|
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
|
||||||
|
|
||||||
// SetNames replaces the list of names associated with an image with the
|
// updateNames modifies names associated with an image based on (op, names).
|
||||||
// supplied values. The values are expected to be valid normalized
|
// The values are expected to be valid normalized
|
||||||
// named image references.
|
// named image references.
|
||||||
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
|
updateNames(id string, names []string, op updateNameOperation) error
|
||||||
SetNames(id string, names []string) error
|
|
||||||
|
|
||||||
// AddNames adds the supplied values to the list of names associated with the image with
|
|
||||||
// the specified id. The values are expected to be valid normalized
|
|
||||||
// named image references.
|
|
||||||
AddNames(id string, names []string) error
|
|
||||||
|
|
||||||
// RemoveNames removes the supplied values from the list of names associated with the image with
|
|
||||||
// the specified id. The values are expected to be valid normalized
|
|
||||||
// named image references.
|
|
||||||
RemoveNames(id string, names []string) error
|
|
||||||
|
|
||||||
// Delete removes the record of the image.
|
// Delete removes the record of the image.
|
||||||
Delete(id string) error
|
Delete(id string) error
|
||||||
@ -157,7 +153,7 @@ type ImageStore interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type imageStore struct {
|
type imageStore struct {
|
||||||
lockfile Locker
|
lockfile Locker // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores.
|
||||||
dir string
|
dir string
|
||||||
images []*Image
|
images []*Image
|
||||||
idindex *truncindex.TruncIndex
|
idindex *truncindex.TruncIndex
|
||||||
@ -197,6 +193,91 @@ func copyImageSlice(slice []*Image) []*Image {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
|
||||||
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
|
//
|
||||||
|
// This is an internal implementation detail of imageStore construction, every other caller
|
||||||
|
// should use startReading() instead.
|
||||||
|
func (r *imageStore) startWritingWithReload(canReload bool) error {
|
||||||
|
r.lockfile.Lock()
|
||||||
|
succeeded := false
|
||||||
|
defer func() {
|
||||||
|
if !succeeded {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if canReload {
|
||||||
|
if err := r.reloadIfChanged(true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
succeeded = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// startWriting makes sure the store is fresh, and locks it for writing.
|
||||||
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
|
func (r *imageStore) startWriting() error {
|
||||||
|
return r.startWritingWithReload(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopWriting releases locks obtained by startWriting.
|
||||||
|
func (r *imageStore) stopWriting() {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading.
|
||||||
|
// If this succeeds, the caller MUST call stopReading().
|
||||||
|
//
|
||||||
|
// This is an internal implementation detail of imageStore construction, every other caller
|
||||||
|
// should use startReading() instead.
|
||||||
|
func (r *imageStore) startReadingWithReload(canReload bool) error {
|
||||||
|
r.lockfile.RLock()
|
||||||
|
succeeded := false
|
||||||
|
defer func() {
|
||||||
|
if !succeeded {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if canReload {
|
||||||
|
if err := r.reloadIfChanged(false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
succeeded = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// startReading makes sure the store is fresh, and locks it for reading.
|
||||||
|
// If this succeeds, the caller MUST call stopReading().
|
||||||
|
func (r *imageStore) startReading() error {
|
||||||
|
return r.startReadingWithReload(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopReading releases locks obtained by startReading.
|
||||||
|
func (r *imageStore) stopReading() {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// reloadIfChanged reloads the contents of the store from disk if it is changed.
|
||||||
|
//
|
||||||
|
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||||
|
// if it is held for writing.
|
||||||
|
func (r *imageStore) reloadIfChanged(lockedForWriting bool) error {
|
||||||
|
r.loadMut.Lock()
|
||||||
|
defer r.loadMut.Unlock()
|
||||||
|
|
||||||
|
modified, err := r.lockfile.Modified()
|
||||||
|
if err == nil && modified {
|
||||||
|
return r.load(lockedForWriting)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (r *imageStore) Images() ([]Image, error) {
|
func (r *imageStore) Images() ([]Image, error) {
|
||||||
images := make([]Image, len(r.images))
|
images := make([]Image, len(r.images))
|
||||||
for i := range r.images {
|
for i := range r.images {
|
||||||
@ -257,49 +338,57 @@ func (i *Image) recomputeDigests() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *imageStore) Load() error {
|
// load reloads the contents of the store from disk.
|
||||||
|
//
|
||||||
|
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||||
|
// if it is held for writing.
|
||||||
|
func (r *imageStore) load(lockedForWriting bool) error {
|
||||||
shouldSave := false
|
shouldSave := false
|
||||||
rpath := r.imagespath()
|
rpath := r.imagespath()
|
||||||
data, err := os.ReadFile(rpath)
|
data, err := os.ReadFile(rpath)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
images := []*Image{}
|
images := []*Image{}
|
||||||
idlist := []string{}
|
if len(data) != 0 {
|
||||||
|
if err := json.Unmarshal(data, &images); err != nil {
|
||||||
|
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idlist := make([]string, 0, len(images))
|
||||||
ids := make(map[string]*Image)
|
ids := make(map[string]*Image)
|
||||||
names := make(map[string]*Image)
|
names := make(map[string]*Image)
|
||||||
digests := make(map[digest.Digest][]*Image)
|
digests := make(map[digest.Digest][]*Image)
|
||||||
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
|
for n, image := range images {
|
||||||
idlist = make([]string, 0, len(images))
|
ids[image.ID] = images[n]
|
||||||
for n, image := range images {
|
idlist = append(idlist, image.ID)
|
||||||
ids[image.ID] = images[n]
|
for _, name := range image.Names {
|
||||||
idlist = append(idlist, image.ID)
|
if conflict, ok := names[name]; ok {
|
||||||
for _, name := range image.Names {
|
r.removeName(conflict, name)
|
||||||
if conflict, ok := names[name]; ok {
|
shouldSave = true
|
||||||
r.removeName(conflict, name)
|
|
||||||
shouldSave = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Compute the digest list.
|
|
||||||
err = image.recomputeDigests()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
|
|
||||||
}
|
|
||||||
for _, name := range image.Names {
|
|
||||||
names[name] = image
|
|
||||||
}
|
|
||||||
for _, digest := range image.Digests {
|
|
||||||
list := digests[digest]
|
|
||||||
digests[digest] = append(list, image)
|
|
||||||
}
|
|
||||||
image.ReadOnly = !r.IsReadWrite()
|
|
||||||
}
|
}
|
||||||
|
// Compute the digest list.
|
||||||
|
if err := image.recomputeDigests(); err != nil {
|
||||||
|
return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
|
||||||
|
}
|
||||||
|
for _, name := range image.Names {
|
||||||
|
names[name] = image
|
||||||
|
}
|
||||||
|
for _, digest := range image.Digests {
|
||||||
|
list := digests[digest]
|
||||||
|
digests[digest] = append(list, image)
|
||||||
|
}
|
||||||
|
image.ReadOnly = !r.lockfile.IsReadWrite()
|
||||||
}
|
}
|
||||||
if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
|
|
||||||
|
if shouldSave && (!r.lockfile.IsReadWrite() || !lockedForWriting) {
|
||||||
|
// Eventually, the callers should be modified to retry with a write lock if IsReadWrite && !lockedForWriting, instead.
|
||||||
return ErrDuplicateImageNames
|
return ErrDuplicateImageNames
|
||||||
}
|
}
|
||||||
r.images = images
|
r.images = images
|
||||||
r.idindex = truncindex.NewTruncIndex(idlist)
|
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||||
r.byid = ids
|
r.byid = ids
|
||||||
r.byname = names
|
r.byname = names
|
||||||
r.bydigest = digests
|
r.bydigest = digests
|
||||||
@ -309,13 +398,13 @@ func (r *imageStore) Load() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Save saves the contents of the store to disk. It should be called with
|
||||||
|
// the lock held, locked for writing.
|
||||||
func (r *imageStore) Save() error {
|
func (r *imageStore) Save() error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
if !r.Locked() {
|
r.lockfile.AssertLockedForWriting()
|
||||||
return errors.New("image store is not locked for writing")
|
|
||||||
}
|
|
||||||
rpath := r.imagespath()
|
rpath := r.imagespath()
|
||||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -324,11 +413,13 @@ func (r *imageStore) Save() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer r.Touch()
|
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
|
||||||
return ioutils.AtomicWriteFile(rpath, jdata, 0600)
|
return err
|
||||||
|
}
|
||||||
|
return r.lockfile.Touch()
|
||||||
}
|
}
|
||||||
|
|
||||||
func newImageStore(dir string) (ImageStore, error) {
|
func newImageStore(dir string) (rwImageStore, error) {
|
||||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -336,8 +427,6 @@ func newImageStore(dir string) (ImageStore, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
lockfile.Lock()
|
|
||||||
defer lockfile.Unlock()
|
|
||||||
istore := imageStore{
|
istore := imageStore{
|
||||||
lockfile: lockfile,
|
lockfile: lockfile,
|
||||||
dir: dir,
|
dir: dir,
|
||||||
@ -346,19 +435,21 @@ func newImageStore(dir string) (ImageStore, error) {
|
|||||||
byname: make(map[string]*Image),
|
byname: make(map[string]*Image),
|
||||||
bydigest: make(map[digest.Digest][]*Image),
|
bydigest: make(map[digest.Digest][]*Image),
|
||||||
}
|
}
|
||||||
if err := istore.Load(); err != nil {
|
if err := istore.startWritingWithReload(false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer istore.stopWriting()
|
||||||
|
if err := istore.load(true); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &istore, nil
|
return &istore, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newROImageStore(dir string) (ROImageStore, error) {
|
func newROImageStore(dir string) (roImageStore, error) {
|
||||||
lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock"))
|
lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
lockfile.RLock()
|
|
||||||
defer lockfile.Unlock()
|
|
||||||
istore := imageStore{
|
istore := imageStore{
|
||||||
lockfile: lockfile,
|
lockfile: lockfile,
|
||||||
dir: dir,
|
dir: dir,
|
||||||
@ -367,7 +458,11 @@ func newROImageStore(dir string) (ROImageStore, error) {
|
|||||||
byname: make(map[string]*Image),
|
byname: make(map[string]*Image),
|
||||||
bydigest: make(map[digest.Digest][]*Image),
|
bydigest: make(map[digest.Digest][]*Image),
|
||||||
}
|
}
|
||||||
if err := istore.Load(); err != nil {
|
if err := istore.startReadingWithReload(false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer istore.stopReading()
|
||||||
|
if err := istore.load(false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &istore, nil
|
return &istore, nil
|
||||||
@ -386,7 +481,7 @@ func (r *imageStore) lookup(id string) (*Image, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *imageStore) ClearFlag(id string, flag string) error {
|
func (r *imageStore) ClearFlag(id string, flag string) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
image, ok := r.lookup(id)
|
image, ok := r.lookup(id)
|
||||||
@ -398,7 +493,7 @@ func (r *imageStore) ClearFlag(id string, flag string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
|
func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
image, ok := r.lookup(id)
|
image, ok := r.lookup(id)
|
||||||
@ -413,7 +508,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
|
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
if id == "" {
|
if id == "" {
|
||||||
@ -455,7 +550,9 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
|||||||
return nil, fmt.Errorf("validating digests for new image: %w", err)
|
return nil, fmt.Errorf("validating digests for new image: %w", err)
|
||||||
}
|
}
|
||||||
r.images = append(r.images, image)
|
r.images = append(r.images, image)
|
||||||
r.idindex.Add(id)
|
// This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||||
|
// Implementing recovery from an unlikely and unimportant failure here would be too risky.
|
||||||
|
_ = r.idindex.Add(id)
|
||||||
r.byid[id] = image
|
r.byid[id] = image
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
r.byname[name] = image
|
r.byname[name] = image
|
||||||
@ -498,7 +595,7 @@ func (r *imageStore) Metadata(id string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *imageStore) SetMetadata(id, metadata string) error {
|
func (r *imageStore) SetMetadata(id, metadata string) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
if image, ok := r.lookup(id); ok {
|
if image, ok := r.lookup(id); ok {
|
||||||
@ -516,21 +613,8 @@ func (i *Image) addNameToHistory(name string) {
|
|||||||
i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...))
|
i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
|
|
||||||
func (r *imageStore) SetNames(id string, names []string) error {
|
|
||||||
return r.updateNames(id, names, setNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) AddNames(id string, names []string) error {
|
|
||||||
return r.updateNames(id, names, addNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) RemoveNames(id string, names []string) error {
|
|
||||||
return r.updateNames(id, names, removeNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error {
|
func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
image, ok := r.lookup(id)
|
image, ok := r.lookup(id)
|
||||||
@ -557,7 +641,7 @@ func (r *imageStore) updateNames(id string, names []string, op updateNameOperati
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *imageStore) Delete(id string) error {
|
func (r *imageStore) Delete(id string) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
image, ok := r.lookup(id)
|
image, ok := r.lookup(id)
|
||||||
@ -572,7 +656,9 @@ func (r *imageStore) Delete(id string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete(r.byid, id)
|
delete(r.byid, id)
|
||||||
r.idindex.Delete(id)
|
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||||
|
// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
|
||||||
|
_ = r.idindex.Delete(id)
|
||||||
for _, name := range image.Names {
|
for _, name := range image.Names {
|
||||||
delete(r.byname, name)
|
delete(r.byname, name)
|
||||||
}
|
}
|
||||||
@ -608,13 +694,6 @@ func (r *imageStore) Get(id string) (*Image, error) {
|
|||||||
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *imageStore) Lookup(name string) (id string, err error) {
|
|
||||||
if image, ok := r.lookup(name); ok {
|
|
||||||
return image.ID, nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) Exists(id string) bool {
|
func (r *imageStore) Exists(id string) bool {
|
||||||
_, ok := r.lookup(id)
|
_, ok := r.lookup(id)
|
||||||
return ok
|
return ok
|
||||||
@ -698,7 +777,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
|
|||||||
if key == "" {
|
if key == "" {
|
||||||
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
|
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
|
||||||
}
|
}
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
image, ok := r.lookup(id)
|
image, ok := r.lookup(id)
|
||||||
@ -779,7 +858,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *imageStore) Wipe() error {
|
func (r *imageStore) Wipe() error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
ids := make([]string, 0, len(r.byid))
|
ids := make([]string, 0, len(r.byid))
|
||||||
@ -793,50 +872,3 @@ func (r *imageStore) Wipe() error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *imageStore) Lock() {
|
|
||||||
r.lockfile.Lock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) RecursiveLock() {
|
|
||||||
r.lockfile.RecursiveLock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) RLock() {
|
|
||||||
r.lockfile.RLock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) Unlock() {
|
|
||||||
r.lockfile.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) Touch() error {
|
|
||||||
return r.lockfile.Touch()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) Modified() (bool, error) {
|
|
||||||
return r.lockfile.Modified()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) IsReadWrite() bool {
|
|
||||||
return r.lockfile.IsReadWrite()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) TouchedSince(when time.Time) bool {
|
|
||||||
return r.lockfile.TouchedSince(when)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) Locked() bool {
|
|
||||||
return r.lockfile.Locked()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *imageStore) ReloadIfChanged() error {
|
|
||||||
r.loadMut.Lock()
|
|
||||||
defer r.loadMut.Unlock()
|
|
||||||
|
|
||||||
modified, err := r.Modified()
|
|
||||||
if err == nil && modified {
|
|
||||||
return r.Load()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
507
vendor/github.com/containers/storage/layers.go
generated
vendored
507
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
|||||||
multierror "github.com/hashicorp/go-multierror"
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
"github.com/klauspost/pgzip"
|
"github.com/klauspost/pgzip"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/vbatts/tar-split/archive/tar"
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
"github.com/vbatts/tar-split/tar/asm"
|
"github.com/vbatts/tar-split/tar/asm"
|
||||||
@ -137,13 +137,19 @@ type DiffOptions struct {
|
|||||||
Compression *archive.Compression
|
Compression *archive.Compression
|
||||||
}
|
}
|
||||||
|
|
||||||
// ROLayerStore wraps a graph driver, adding the ability to refer to layers by
|
// roLayerStore wraps a graph driver, adding the ability to refer to layers by
|
||||||
// name, and keeping track of parent-child relationships, along with a list of
|
// name, and keeping track of parent-child relationships, along with a list of
|
||||||
// all known layers.
|
// all known layers.
|
||||||
type ROLayerStore interface {
|
type roLayerStore interface {
|
||||||
ROFileBasedStore
|
roMetadataStore
|
||||||
ROMetadataStore
|
roLayerBigDataStore
|
||||||
ROLayerBigDataStore
|
|
||||||
|
// startReading makes sure the store is fresh, and locks it for reading.
|
||||||
|
// If this succeeds, the caller MUST call stopReading().
|
||||||
|
startReading() error
|
||||||
|
|
||||||
|
// stopReading releases locks obtained by startReading.
|
||||||
|
stopReading()
|
||||||
|
|
||||||
// Exists checks if a layer with the specified name or ID is known.
|
// Exists checks if a layer with the specified name or ID is known.
|
||||||
Exists(id string) bool
|
Exists(id string) bool
|
||||||
@ -177,10 +183,6 @@ type ROLayerStore interface {
|
|||||||
// found, it returns an error.
|
// found, it returns an error.
|
||||||
Size(name string) (int64, error)
|
Size(name string) (int64, error)
|
||||||
|
|
||||||
// Lookup attempts to translate a name to an ID. Most methods do this
|
|
||||||
// implicitly.
|
|
||||||
Lookup(name string) (string, error)
|
|
||||||
|
|
||||||
// LayersByCompressedDigest returns a slice of the layers with the
|
// LayersByCompressedDigest returns a slice of the layers with the
|
||||||
// specified compressed digest value recorded for them.
|
// specified compressed digest value recorded for them.
|
||||||
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
|
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
|
||||||
@ -193,15 +195,21 @@ type ROLayerStore interface {
|
|||||||
Layers() ([]Layer, error)
|
Layers() ([]Layer, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LayerStore wraps a graph driver, adding the ability to refer to layers by
|
// rwLayerStore wraps a graph driver, adding the ability to refer to layers by
|
||||||
// name, and keeping track of parent-child relationships, along with a list of
|
// name, and keeping track of parent-child relationships, along with a list of
|
||||||
// all known layers.
|
// all known layers.
|
||||||
type LayerStore interface {
|
type rwLayerStore interface {
|
||||||
ROLayerStore
|
roLayerStore
|
||||||
RWFileBasedStore
|
rwMetadataStore
|
||||||
RWMetadataStore
|
flaggableStore
|
||||||
FlaggableStore
|
rwLayerBigDataStore
|
||||||
RWLayerBigDataStore
|
|
||||||
|
// startWriting makes sure the store is fresh, and locks it for writing.
|
||||||
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
|
startWriting() error
|
||||||
|
|
||||||
|
// stopWriting releases locks obtained by startWriting.
|
||||||
|
stopWriting()
|
||||||
|
|
||||||
// Create creates a new layer, optionally giving it a specified ID rather than
|
// Create creates a new layer, optionally giving it a specified ID rather than
|
||||||
// a randomly-generated one, either inheriting data from another specified
|
// a randomly-generated one, either inheriting data from another specified
|
||||||
@ -218,18 +226,8 @@ type LayerStore interface {
|
|||||||
// Put combines the functions of CreateWithFlags and ApplyDiff.
|
// Put combines the functions of CreateWithFlags and ApplyDiff.
|
||||||
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
|
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
|
||||||
|
|
||||||
// SetNames replaces the list of names associated with a layer with the
|
// updateNames modifies names associated with a layer based on (op, names).
|
||||||
// supplied values.
|
updateNames(id string, names []string, op updateNameOperation) error
|
||||||
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
|
|
||||||
SetNames(id string, names []string) error
|
|
||||||
|
|
||||||
// AddNames adds the supplied values to the list of names associated with the layer with the
|
|
||||||
// specified id.
|
|
||||||
AddNames(id string, names []string) error
|
|
||||||
|
|
||||||
// RemoveNames remove the supplied values from the list of names associated with the layer with the
|
|
||||||
// specified id.
|
|
||||||
RemoveNames(id string, names []string) error
|
|
||||||
|
|
||||||
// Delete deletes a layer with the specified name or ID.
|
// Delete deletes a layer with the specified name or ID.
|
||||||
Delete(id string) error
|
Delete(id string) error
|
||||||
@ -270,10 +268,6 @@ type LayerStore interface {
|
|||||||
// DifferTarget gets the location where files are stored for the layer.
|
// DifferTarget gets the location where files are stored for the layer.
|
||||||
DifferTarget(id string) (string, error)
|
DifferTarget(id string) (string, error)
|
||||||
|
|
||||||
// LoadLocked wraps Load in a locked state. This means it loads the store
|
|
||||||
// and cleans-up invalid layers if needed.
|
|
||||||
LoadLocked() error
|
|
||||||
|
|
||||||
// PutAdditionalLayer creates a layer using the diff contained in the additional layer
|
// PutAdditionalLayer creates a layer using the diff contained in the additional layer
|
||||||
// store.
|
// store.
|
||||||
// This API is experimental and can be changed without bumping the major version number.
|
// This API is experimental and can be changed without bumping the major version number.
|
||||||
@ -293,8 +287,6 @@ type layerStore struct {
|
|||||||
bymount map[string]*Layer
|
bymount map[string]*Layer
|
||||||
bycompressedsum map[digest.Digest][]string
|
bycompressedsum map[digest.Digest][]string
|
||||||
byuncompressedsum map[digest.Digest][]string
|
byuncompressedsum map[digest.Digest][]string
|
||||||
uidMap []idtools.IDMap
|
|
||||||
gidMap []idtools.IDMap
|
|
||||||
loadMut sync.Mutex
|
loadMut sync.Mutex
|
||||||
layerspathModified time.Time
|
layerspathModified time.Time
|
||||||
}
|
}
|
||||||
@ -324,6 +316,125 @@ func copyLayer(l *Layer) *Layer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
|
||||||
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
|
//
|
||||||
|
// This is an internal implementation detail of layerStore construction, every other caller
|
||||||
|
// should use startWriting() instead.
|
||||||
|
func (r *layerStore) startWritingWithReload(canReload bool) error {
|
||||||
|
r.lockfile.Lock()
|
||||||
|
succeeded := false
|
||||||
|
defer func() {
|
||||||
|
if !succeeded {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if canReload {
|
||||||
|
if err := r.reloadIfChanged(true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
succeeded = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// startWriting makes sure the store is fresh, and locks it for writing.
|
||||||
|
// If this succeeds, the caller MUST call stopWriting().
|
||||||
|
func (r *layerStore) startWriting() error {
|
||||||
|
return r.startWritingWithReload(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopWriting releases locks obtained by startWriting.
|
||||||
|
func (r *layerStore) stopWriting() {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading.
|
||||||
|
// If this succeeds, the caller MUST call stopReading().
|
||||||
|
//
|
||||||
|
// This is an internal implementation detail of layerStore construction, every other caller
|
||||||
|
// should use startReading() instead.
|
||||||
|
func (r *layerStore) startReadingWithReload(canReload bool) error {
|
||||||
|
r.lockfile.RLock()
|
||||||
|
succeeded := false
|
||||||
|
defer func() {
|
||||||
|
if !succeeded {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if canReload {
|
||||||
|
if err := r.reloadIfChanged(false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
succeeded = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// startReading makes sure the store is fresh, and locks it for reading.
|
||||||
|
// If this succeeds, the caller MUST call stopReading().
|
||||||
|
func (r *layerStore) startReading() error {
|
||||||
|
return r.startReadingWithReload(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopReading releases locks obtained by startReading.
|
||||||
|
func (r *layerStore) stopReading() {
|
||||||
|
r.lockfile.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modified() checks if the most recent writer was a party other than the
|
||||||
|
// last recorded writer. It should only be called with the lock held.
|
||||||
|
func (r *layerStore) Modified() (bool, error) {
|
||||||
|
var mmodified, tmodified bool
|
||||||
|
lmodified, err := r.lockfile.Modified()
|
||||||
|
if err != nil {
|
||||||
|
return lmodified, err
|
||||||
|
}
|
||||||
|
if r.lockfile.IsReadWrite() {
|
||||||
|
r.mountsLockfile.RLock()
|
||||||
|
defer r.mountsLockfile.Unlock()
|
||||||
|
mmodified, err = r.mountsLockfile.Modified()
|
||||||
|
if err != nil {
|
||||||
|
return lmodified, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if lmodified || mmodified {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the layers.json file has been modified manually, then we have to
|
||||||
|
// reload the storage in any case.
|
||||||
|
info, err := os.Stat(r.layerspath())
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return false, fmt.Errorf("stat layers file: %w", err)
|
||||||
|
}
|
||||||
|
if info != nil {
|
||||||
|
tmodified = info.ModTime() != r.layerspathModified
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmodified, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reloadIfChanged reloads the contents of the store from disk if it is changed.
|
||||||
|
//
|
||||||
|
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||||
|
// if it is held for writing.
|
||||||
|
func (r *layerStore) reloadIfChanged(lockedForWriting bool) error {
|
||||||
|
r.loadMut.Lock()
|
||||||
|
defer r.loadMut.Unlock()
|
||||||
|
|
||||||
|
modified, err := r.Modified()
|
||||||
|
if err == nil && modified {
|
||||||
|
return r.load(lockedForWriting)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (r *layerStore) Layers() ([]Layer, error) {
|
func (r *layerStore) Layers() ([]Layer, error) {
|
||||||
layers := make([]Layer, len(r.layers))
|
layers := make([]Layer, len(r.layers))
|
||||||
for i := range r.layers {
|
for i := range r.layers {
|
||||||
@ -340,7 +451,11 @@ func (r *layerStore) layerspath() string {
|
|||||||
return filepath.Join(r.layerdir, "layers.json")
|
return filepath.Join(r.layerdir, "layers.json")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Load() error {
|
// load reloads the contents of the store from disk.
|
||||||
|
//
|
||||||
|
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||||||
|
// if it is held for writing.
|
||||||
|
func (r *layerStore) load(lockedForWriting bool) error {
|
||||||
shouldSave := false
|
shouldSave := false
|
||||||
rpath := r.layerspath()
|
rpath := r.layerspath()
|
||||||
info, err := os.Stat(rpath)
|
info, err := os.Stat(rpath)
|
||||||
@ -355,62 +470,67 @@ func (r *layerStore) Load() error {
|
|||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
layers := []*Layer{}
|
layers := []*Layer{}
|
||||||
idlist := []string{}
|
if len(data) != 0 {
|
||||||
|
if err := json.Unmarshal(data, &layers); err != nil {
|
||||||
|
return fmt.Errorf("loading %q: %w", rpath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idlist := make([]string, 0, len(layers))
|
||||||
ids := make(map[string]*Layer)
|
ids := make(map[string]*Layer)
|
||||||
names := make(map[string]*Layer)
|
names := make(map[string]*Layer)
|
||||||
compressedsums := make(map[digest.Digest][]string)
|
compressedsums := make(map[digest.Digest][]string)
|
||||||
uncompressedsums := make(map[digest.Digest][]string)
|
uncompressedsums := make(map[digest.Digest][]string)
|
||||||
if r.IsReadWrite() {
|
if r.lockfile.IsReadWrite() {
|
||||||
label.ClearLabels()
|
selinux.ClearLabels()
|
||||||
}
|
}
|
||||||
if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
|
for n, layer := range layers {
|
||||||
idlist = make([]string, 0, len(layers))
|
ids[layer.ID] = layers[n]
|
||||||
for n, layer := range layers {
|
idlist = append(idlist, layer.ID)
|
||||||
ids[layer.ID] = layers[n]
|
for _, name := range layer.Names {
|
||||||
idlist = append(idlist, layer.ID)
|
if conflict, ok := names[name]; ok {
|
||||||
for _, name := range layer.Names {
|
r.removeName(conflict, name)
|
||||||
if conflict, ok := names[name]; ok {
|
shouldSave = true
|
||||||
r.removeName(conflict, name)
|
|
||||||
shouldSave = true
|
|
||||||
}
|
|
||||||
names[name] = layers[n]
|
|
||||||
}
|
}
|
||||||
if layer.CompressedDigest != "" {
|
names[name] = layers[n]
|
||||||
compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
|
|
||||||
}
|
|
||||||
if layer.UncompressedDigest != "" {
|
|
||||||
uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
|
|
||||||
}
|
|
||||||
if layer.MountLabel != "" {
|
|
||||||
label.ReserveLabel(layer.MountLabel)
|
|
||||||
}
|
|
||||||
layer.ReadOnly = !r.IsReadWrite()
|
|
||||||
}
|
}
|
||||||
err = nil
|
if layer.CompressedDigest != "" {
|
||||||
|
compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
|
||||||
|
}
|
||||||
|
if layer.UncompressedDigest != "" {
|
||||||
|
uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
|
||||||
|
}
|
||||||
|
if layer.MountLabel != "" {
|
||||||
|
selinux.ReserveLabel(layer.MountLabel)
|
||||||
|
}
|
||||||
|
layer.ReadOnly = !r.lockfile.IsReadWrite()
|
||||||
}
|
}
|
||||||
if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
|
|
||||||
|
if shouldSave && (!r.lockfile.IsReadWrite() || !lockedForWriting) {
|
||||||
|
// Eventually, the callers should be modified to retry with a write lock if IsReadWrite && !lockedForWriting, instead.
|
||||||
return ErrDuplicateLayerNames
|
return ErrDuplicateLayerNames
|
||||||
}
|
}
|
||||||
r.layers = layers
|
r.layers = layers
|
||||||
r.idindex = truncindex.NewTruncIndex(idlist)
|
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||||||
r.byid = ids
|
r.byid = ids
|
||||||
r.byname = names
|
r.byname = names
|
||||||
r.bycompressedsum = compressedsums
|
r.bycompressedsum = compressedsums
|
||||||
r.byuncompressedsum = uncompressedsums
|
r.byuncompressedsum = uncompressedsums
|
||||||
|
|
||||||
// Load and merge information about which layers are mounted, and where.
|
// Load and merge information about which layers are mounted, and where.
|
||||||
if r.IsReadWrite() {
|
if r.lockfile.IsReadWrite() {
|
||||||
r.mountsLockfile.RLock()
|
r.mountsLockfile.RLock()
|
||||||
defer r.mountsLockfile.Unlock()
|
defer r.mountsLockfile.Unlock()
|
||||||
if err = r.loadMounts(); err != nil {
|
if err := r.loadMounts(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Last step: as we’re writable, try to remove anything that a previous
|
// Last step: as we’re writable, try to remove anything that a previous
|
||||||
// user of this storage area marked for deletion but didn't manage to
|
// user of this storage area marked for deletion but didn't manage to
|
||||||
// actually delete.
|
// actually delete.
|
||||||
if r.Locked() {
|
var incompleteDeletionErrors error // = nil
|
||||||
|
if lockedForWriting {
|
||||||
for _, layer := range r.layers {
|
for _, layer := range r.layers {
|
||||||
if layer.Flags == nil {
|
if layer.Flags == nil {
|
||||||
layer.Flags = make(map[string]interface{})
|
layer.Flags = make(map[string]interface{})
|
||||||
@ -419,24 +539,26 @@ func (r *layerStore) Load() error {
|
|||||||
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
|
||||||
err = r.deleteInternal(layer.ID)
|
err = r.deleteInternal(layer.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||||
|
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||||
|
// deleted incomplete layers have their metadata correctly removed.
|
||||||
|
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||||
|
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||||
}
|
}
|
||||||
shouldSave = true
|
shouldSave = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if shouldSave {
|
if shouldSave {
|
||||||
return r.saveLayers()
|
if err := r.saveLayers(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if incompleteDeletionErrors != nil {
|
||||||
|
return incompleteDeletionErrors
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) LoadLocked() error {
|
|
||||||
r.lockfile.Lock()
|
|
||||||
defer r.lockfile.Unlock()
|
|
||||||
return r.Load()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) loadMounts() error {
|
func (r *layerStore) loadMounts() error {
|
||||||
@ -476,10 +598,11 @@ func (r *layerStore) loadMounts() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Save saves the contents of the store to disk. It should be called with
|
||||||
|
// the lock held, locked for writing.
|
||||||
func (r *layerStore) Save() error {
|
func (r *layerStore) Save() error {
|
||||||
r.mountsLockfile.Lock()
|
r.mountsLockfile.Lock()
|
||||||
defer r.mountsLockfile.Unlock()
|
defer r.mountsLockfile.Unlock()
|
||||||
defer r.mountsLockfile.Touch()
|
|
||||||
if err := r.saveLayers(); err != nil {
|
if err := r.saveLayers(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -487,12 +610,10 @@ func (r *layerStore) Save() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) saveLayers() error {
|
func (r *layerStore) saveLayers() error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
if !r.Locked() {
|
r.lockfile.AssertLockedForWriting()
|
||||||
return errors.New("layer store is not locked for writing")
|
|
||||||
}
|
|
||||||
rpath := r.layerspath()
|
rpath := r.layerspath()
|
||||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -501,17 +622,17 @@ func (r *layerStore) saveLayers() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer r.Touch()
|
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
|
||||||
return ioutils.AtomicWriteFile(rpath, jldata, 0600)
|
return err
|
||||||
|
}
|
||||||
|
return r.lockfile.Touch()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) saveMounts() error {
|
func (r *layerStore) saveMounts() error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
if !r.mountsLockfile.Locked() {
|
r.mountsLockfile.AssertLockedForWriting()
|
||||||
return errors.New("layer store mount information is not locked for writing")
|
|
||||||
}
|
|
||||||
mpath := r.mountspath()
|
mpath := r.mountspath()
|
||||||
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
|
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -533,10 +654,13 @@ func (r *layerStore) saveMounts() error {
|
|||||||
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
|
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := r.mountsLockfile.Touch(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return r.loadMounts()
|
return r.loadMounts()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) {
|
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (rwLayerStore, error) {
|
||||||
if err := os.MkdirAll(rundir, 0700); err != nil {
|
if err := os.MkdirAll(rundir, 0700); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -560,18 +684,18 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
|||||||
byid: make(map[string]*Layer),
|
byid: make(map[string]*Layer),
|
||||||
bymount: make(map[string]*Layer),
|
bymount: make(map[string]*Layer),
|
||||||
byname: make(map[string]*Layer),
|
byname: make(map[string]*Layer),
|
||||||
uidMap: copyIDMap(s.uidMap),
|
|
||||||
gidMap: copyIDMap(s.gidMap),
|
|
||||||
}
|
}
|
||||||
rlstore.Lock()
|
if err := rlstore.startWritingWithReload(false); err != nil {
|
||||||
defer rlstore.Unlock()
|
return nil, err
|
||||||
if err := rlstore.Load(); err != nil {
|
}
|
||||||
|
defer rlstore.stopWriting()
|
||||||
|
if err := rlstore.load(true); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &rlstore, nil
|
return &rlstore, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) {
|
func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roLayerStore, error) {
|
||||||
lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
|
lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -586,9 +710,11 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
|
|||||||
bymount: make(map[string]*Layer),
|
bymount: make(map[string]*Layer),
|
||||||
byname: make(map[string]*Layer),
|
byname: make(map[string]*Layer),
|
||||||
}
|
}
|
||||||
rlstore.RLock()
|
if err := rlstore.startReadingWithReload(false); err != nil {
|
||||||
defer rlstore.Unlock()
|
return nil, err
|
||||||
if err := rlstore.Load(); err != nil {
|
}
|
||||||
|
defer rlstore.stopReading()
|
||||||
|
if err := rlstore.load(false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &rlstore, nil
|
return &rlstore, nil
|
||||||
@ -621,7 +747,7 @@ func (r *layerStore) Size(name string) (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) ClearFlag(id string, flag string) error {
|
func (r *layerStore) ClearFlag(id string, flag string) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
@ -633,7 +759,7 @@ func (r *layerStore) ClearFlag(id string, flag string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
|
func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
@ -685,7 +811,9 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
|||||||
|
|
||||||
// TODO: check if necessary fields are filled
|
// TODO: check if necessary fields are filled
|
||||||
r.layers = append(r.layers, layer)
|
r.layers = append(r.layers, layer)
|
||||||
r.idindex.Add(id)
|
// This can only fail on duplicate IDs, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||||
|
// Implementing recovery from an unlikely and unimportant failure here would be too risky.
|
||||||
|
_ = r.idindex.Add(id)
|
||||||
r.byid[id] = layer
|
r.byid[id] = layer
|
||||||
for _, name := range names { // names got from the additional layer store won't be used
|
for _, name := range names { // names got from the additional layer store won't be used
|
||||||
r.byname[name] = layer
|
r.byname[name] = layer
|
||||||
@ -697,14 +825,16 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
|||||||
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
||||||
}
|
}
|
||||||
if err := r.Save(); err != nil {
|
if err := r.Save(); err != nil {
|
||||||
r.driver.Remove(id)
|
if err2 := r.driver.Remove(id); err2 != nil {
|
||||||
|
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, err2)
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return copyLayer(layer), nil
|
return copyLayer(layer), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
|
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(r.rundir, 0700); err != nil {
|
if err := os.MkdirAll(r.rundir, 0700); err != nil {
|
||||||
@ -770,7 +900,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
|||||||
parentMappings = &idtools.IDMappings{}
|
parentMappings = &idtools.IDMappings{}
|
||||||
}
|
}
|
||||||
if mountLabel != "" {
|
if mountLabel != "" {
|
||||||
label.ReserveLabel(mountLabel)
|
selinux.ReserveLabel(mountLabel)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Before actually creating the layer, make a persistent record of it with incompleteFlag,
|
// Before actually creating the layer, make a persistent record of it with incompleteFlag,
|
||||||
@ -795,7 +925,9 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
|||||||
BigDataNames: []string{},
|
BigDataNames: []string{},
|
||||||
}
|
}
|
||||||
r.layers = append(r.layers, layer)
|
r.layers = append(r.layers, layer)
|
||||||
r.idindex.Add(id)
|
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||||
|
// This is on various paths to recover from failures, so this should be robust against partially missing data.
|
||||||
|
_ = r.idindex.Add(id)
|
||||||
r.byid[id] = layer
|
r.byid[id] = layer
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
r.byname[name] = layer
|
r.byname[name] = layer
|
||||||
@ -907,7 +1039,7 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Mounted(id string) (int, error) {
|
func (r *layerStore) Mounted(id string) (int, error) {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
r.mountsLockfile.RLock()
|
r.mountsLockfile.RLock()
|
||||||
@ -937,7 +1069,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
|
|||||||
|
|
||||||
// You are not allowed to mount layers from readonly stores if they
|
// You are not allowed to mount layers from readonly stores if they
|
||||||
// are not mounted read/only.
|
// are not mounted read/only.
|
||||||
if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
|
if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
|
||||||
return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
r.mountsLockfile.Lock()
|
r.mountsLockfile.Lock()
|
||||||
@ -947,7 +1079,6 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
defer r.mountsLockfile.Touch()
|
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", ErrLayerUnknown
|
return "", ErrLayerUnknown
|
||||||
@ -988,7 +1119,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
r.mountsLockfile.Lock()
|
r.mountsLockfile.Lock()
|
||||||
@ -998,7 +1129,6 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
defer r.mountsLockfile.Touch()
|
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
layerByMount, ok := r.bymount[filepath.Clean(id)]
|
layerByMount, ok := r.bymount[filepath.Clean(id)]
|
||||||
@ -1027,7 +1157,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
|
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
r.mountsLockfile.RLock()
|
r.mountsLockfile.RLock()
|
||||||
@ -1101,21 +1231,8 @@ func (r *layerStore) removeName(layer *Layer, name string) {
|
|||||||
layer.Names = stringSliceWithoutValue(layer.Names, name)
|
layer.Names = stringSliceWithoutValue(layer.Names, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
|
|
||||||
func (r *layerStore) SetNames(id string, names []string) error {
|
|
||||||
return r.updateNames(id, names, setNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) AddNames(id string, names []string) error {
|
|
||||||
return r.updateNames(id, names, addNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) RemoveNames(id string, names []string) error {
|
|
||||||
return r.updateNames(id, names, removeNames)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
|
func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
@ -1163,7 +1280,7 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
|
|||||||
if key == "" {
|
if key == "" {
|
||||||
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
|
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
|
||||||
}
|
}
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
@ -1222,7 +1339,7 @@ func (r *layerStore) Metadata(id string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) SetMetadata(id, metadata string) error {
|
func (r *layerStore) SetMetadata(id, metadata string) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
if layer, ok := r.lookup(id); ok {
|
if layer, ok := r.lookup(id); ok {
|
||||||
@ -1248,7 +1365,7 @@ func layerHasIncompleteFlag(layer *Layer) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) deleteInternal(id string) error {
|
func (r *layerStore) deleteInternal(id string) error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
@ -1268,8 +1385,7 @@ func (r *layerStore) deleteInternal(id string) error {
|
|||||||
// We never unset incompleteFlag; below, we remove the entire object from r.layers.
|
// We never unset incompleteFlag; below, we remove the entire object from r.layers.
|
||||||
|
|
||||||
id = layer.ID
|
id = layer.ID
|
||||||
err := r.driver.Remove(id)
|
if err := r.driver.Remove(id); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1279,7 +1395,9 @@ func (r *layerStore) deleteInternal(id string) error {
|
|||||||
for _, name := range layer.Names {
|
for _, name := range layer.Names {
|
||||||
delete(r.byname, name)
|
delete(r.byname, name)
|
||||||
}
|
}
|
||||||
r.idindex.Delete(id)
|
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||||
|
// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
|
||||||
|
_ = r.idindex.Delete(id)
|
||||||
mountLabel := layer.MountLabel
|
mountLabel := layer.MountLabel
|
||||||
if layer.MountPoint != "" {
|
if layer.MountPoint != "" {
|
||||||
delete(r.bymount, layer.MountPoint)
|
delete(r.bymount, layer.MountPoint)
|
||||||
@ -1309,7 +1427,7 @@ func (r *layerStore) deleteInternal(id string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !found {
|
if !found {
|
||||||
label.ReleaseLabel(mountLabel)
|
selinux.ReleaseLabel(mountLabel)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1365,13 +1483,6 @@ func (r *layerStore) Delete(id string) error {
|
|||||||
return r.Save()
|
return r.Save()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Lookup(name string) (id string, err error) {
|
|
||||||
if layer, ok := r.lookup(name); ok {
|
|
||||||
return layer.ID, nil
|
|
||||||
}
|
|
||||||
return "", ErrLayerUnknown
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) Exists(id string) bool {
|
func (r *layerStore) Exists(id string) bool {
|
||||||
_, ok := r.lookup(id)
|
_, ok := r.lookup(id)
|
||||||
return ok
|
return ok
|
||||||
@ -1385,7 +1496,7 @@ func (r *layerStore) Get(id string) (*Layer, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Wipe() error {
|
func (r *layerStore) Wipe() error {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
ids := make([]string, 0, len(r.byid))
|
ids := make([]string, 0, len(r.byid))
|
||||||
@ -1472,6 +1583,24 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// writeCompressedData copies data from source to compressor, which is on top of pwriter.
|
||||||
|
func writeCompressedData(compressor io.WriteCloser, source io.ReadCloser) error {
|
||||||
|
defer compressor.Close()
|
||||||
|
defer source.Close()
|
||||||
|
_, err := io.Copy(compressor, source)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeCompressedDataGoroutine copies data from source to compressor, which is on top of pwriter.
|
||||||
|
// All error must be reported by updating pwriter.
|
||||||
|
func writeCompressedDataGoroutine(pwriter *io.PipeWriter, compressor io.WriteCloser, source io.ReadCloser) {
|
||||||
|
err := errors.New("internal error: unexpected panic in writeCompressedDataGoroutine")
|
||||||
|
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
|
||||||
|
_ = pwriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
|
||||||
|
}()
|
||||||
|
err = writeCompressedData(compressor, source)
|
||||||
|
}
|
||||||
|
|
||||||
func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
|
func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
|
||||||
var metadata storage.Unpacker
|
var metadata storage.Unpacker
|
||||||
|
|
||||||
@ -1503,12 +1632,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
|
|||||||
preader.Close()
|
preader.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
go func() {
|
go writeCompressedDataGoroutine(pwriter, compressor, rc)
|
||||||
defer pwriter.Close()
|
|
||||||
defer compressor.Close()
|
|
||||||
defer rc.Close()
|
|
||||||
io.Copy(compressor, rc)
|
|
||||||
}()
|
|
||||||
return preader, nil
|
return preader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1637,7 +1761,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
|
func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) {
|
||||||
if !r.IsReadWrite() {
|
if !r.lockfile.IsReadWrite() {
|
||||||
return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerspath(), ErrStoreIsReadOnly)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1724,13 +1848,11 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
|
|||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
compressor.Close()
|
compressor.Close()
|
||||||
if err == nil {
|
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
|
||||||
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
|
return -1, err
|
||||||
return -1, err
|
}
|
||||||
}
|
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil {
|
||||||
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil {
|
return -1, err
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if compressedDigester != nil {
|
if compressedDigester != nil {
|
||||||
compressedDigest = compressedDigester.Digest()
|
compressedDigest = compressedDigester.Digest()
|
||||||
@ -1825,7 +1947,9 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
|||||||
}
|
}
|
||||||
for k, v := range diffOutput.BigData {
|
for k, v := range diffOutput.BigData {
|
||||||
if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil {
|
if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil {
|
||||||
r.Delete(id)
|
if err2 := r.Delete(id); err2 != nil {
|
||||||
|
logrus.Errorf("While recovering from a failure to set big data, error deleting layer %#v: %v", id, err2)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1891,81 +2015,6 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error
|
|||||||
return r.layersByDigestMap(r.byuncompressedsum, d)
|
return r.layersByDigestMap(r.byuncompressedsum, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Lock() {
|
|
||||||
r.lockfile.Lock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) RecursiveLock() {
|
|
||||||
r.lockfile.RecursiveLock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) RLock() {
|
|
||||||
r.lockfile.RLock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) Unlock() {
|
|
||||||
r.lockfile.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) Touch() error {
|
|
||||||
return r.lockfile.Touch()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) Modified() (bool, error) {
|
|
||||||
var mmodified, tmodified bool
|
|
||||||
lmodified, err := r.lockfile.Modified()
|
|
||||||
if err != nil {
|
|
||||||
return lmodified, err
|
|
||||||
}
|
|
||||||
if r.IsReadWrite() {
|
|
||||||
r.mountsLockfile.RLock()
|
|
||||||
defer r.mountsLockfile.Unlock()
|
|
||||||
mmodified, err = r.mountsLockfile.Modified()
|
|
||||||
if err != nil {
|
|
||||||
return lmodified, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if lmodified || mmodified {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the layers.json file has been modified manually, then we have to
|
|
||||||
// reload the storage in any case.
|
|
||||||
info, err := os.Stat(r.layerspath())
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return false, fmt.Errorf("stat layers file: %w", err)
|
|
||||||
}
|
|
||||||
if info != nil {
|
|
||||||
tmodified = info.ModTime() != r.layerspathModified
|
|
||||||
}
|
|
||||||
|
|
||||||
return tmodified, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) IsReadWrite() bool {
|
|
||||||
return r.lockfile.IsReadWrite()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) TouchedSince(when time.Time) bool {
|
|
||||||
return r.lockfile.TouchedSince(when)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) Locked() bool {
|
|
||||||
return r.lockfile.Locked()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *layerStore) ReloadIfChanged() error {
|
|
||||||
r.loadMut.Lock()
|
|
||||||
defer r.loadMut.Unlock()
|
|
||||||
|
|
||||||
modified, err := r.Modified()
|
|
||||||
if err == nil && modified {
|
|
||||||
return r.Load()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func closeAll(closes ...func() error) (rErr error) {
|
func closeAll(closes ...func() error) (rErr error) {
|
||||||
for _, f := range closes {
|
for _, f := range closes {
|
||||||
if err := f(); err != nil {
|
if err := f(); err != nil {
|
||||||
|
27
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
27
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -527,6 +527,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||||||
if err := ReadUserXattrToTarHeader(path, hdr); err != nil {
|
if err := ReadUserXattrToTarHeader(path, hdr); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := ReadFileFlagsToTarHeader(path, hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if ta.CopyPass {
|
if ta.CopyPass {
|
||||||
copyPassHeader(hdr)
|
copyPassHeader(hdr)
|
||||||
}
|
}
|
||||||
@ -770,6 +773,15 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We defer setting flags on directories until the end of
|
||||||
|
// Unpack or UnpackLayer in case setting them makes the
|
||||||
|
// directory immutable.
|
||||||
|
if hdr.Typeflag != tar.TypeDir {
|
||||||
|
if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithFields(logrus.Fields{
|
||||||
"errors": errs,
|
"errors": errs,
|
||||||
@ -864,7 +876,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||||||
rebaseName := options.RebaseNames[include]
|
rebaseName := options.RebaseNames[include]
|
||||||
|
|
||||||
walkRoot := getWalkRoot(srcPath, include)
|
walkRoot := getWalkRoot(srcPath, include)
|
||||||
filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error {
|
if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
|
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
|
||||||
return nil
|
return nil
|
||||||
@ -874,7 +886,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||||||
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) {
|
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) {
|
||||||
// Error getting relative path OR we are looking
|
// Error getting relative path OR we are looking
|
||||||
// at the source directory path. Skip in both situations.
|
// at the source directory path. Skip in both situations.
|
||||||
return nil
|
return nil //nolint: nilerr
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.IncludeSourceDir && include == "." && relFilePath != "." {
|
if options.IncludeSourceDir && include == "." && relFilePath != "." {
|
||||||
@ -891,8 +903,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||||||
if include != relFilePath {
|
if include != relFilePath {
|
||||||
matches, err := pm.IsMatch(relFilePath)
|
matches, err := pm.IsMatch(relFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("Matching %s: %v", relFilePath, err)
|
return fmt.Errorf("matching %s: %w", relFilePath, err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
skip = matches
|
skip = matches
|
||||||
}
|
}
|
||||||
@ -955,7 +966,10 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
}); err != nil {
|
||||||
|
logrus.Errorf("%s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -1099,6 +1113,9 @@ loop:
|
|||||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
1
vendor/github.com/containers/storage/pkg/archive/archive_110.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/archive/archive_110.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build go1.10
|
||||||
// +build go1.10
|
// +build go1.10
|
||||||
|
|
||||||
package archive
|
package archive
|
||||||
|
1
vendor/github.com/containers/storage/pkg/archive/archive_19.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/archive/archive_19.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !go1.10
|
||||||
// +build !go1.10
|
// +build !go1.10
|
||||||
|
|
||||||
package archive
|
package archive
|
||||||
|
19
vendor/github.com/containers/storage/pkg/archive/archive_bsd.go
generated
vendored
Normal file
19
vendor/github.com/containers/storage/pkg/archive/archive_bsd.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
//go:build freebsd || darwin
|
||||||
|
// +build freebsd darwin
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
|
||||||
|
permissionsMask := hdrInfo.Mode()
|
||||||
|
if forceMask != nil {
|
||||||
|
permissionsMask = *forceMask
|
||||||
|
}
|
||||||
|
return unix.Fchmodat(unix.AT_FDCWD, path, uint32(permissionsMask), unix.AT_SYMLINK_NOFOLLOW)
|
||||||
|
}
|
129
vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
generated
vendored
129
vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
generated
vendored
@ -1,129 +0,0 @@
|
|||||||
//go:build freebsd
|
|
||||||
// +build freebsd
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/idtools"
|
|
||||||
"github.com/containers/storage/pkg/system"
|
|
||||||
"github.com/containers/storage/pkg/unshare"
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
|
||||||
// the path being passed in is not in a volume path format, convert it to one.
|
|
||||||
func fixVolumePathPrefix(srcPath string) string {
|
|
||||||
return srcPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
|
||||||
// We use a separate function as this is platform specific. On Linux, we
|
|
||||||
// can't use filepath.Join(srcPath,include) because this will clean away
|
|
||||||
// a trailing "." or "/" which may be important.
|
|
||||||
func getWalkRoot(srcPath string, include string) string {
|
|
||||||
return srcPath + string(filepath.Separator) + include
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanonicalTarNameForPath returns platform-specific filepath
|
|
||||||
// to canonical posix-style path for tar archival. p is relative
|
|
||||||
// path.
|
|
||||||
func CanonicalTarNameForPath(p string) (string, error) {
|
|
||||||
return p, nil // already unix-style
|
|
||||||
}
|
|
||||||
|
|
||||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
|
||||||
// on the platform the archival is done.
|
|
||||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
|
||||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
|
||||||
}
|
|
||||||
|
|
||||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
|
||||||
s, ok := stat.(*syscall.Stat_t)
|
|
||||||
|
|
||||||
if ok {
|
|
||||||
// Currently go does not fill in the major/minors
|
|
||||||
if s.Mode&unix.S_IFBLK != 0 ||
|
|
||||||
s.Mode&unix.S_IFCHR != 0 {
|
|
||||||
hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
|
|
||||||
hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
|
||||||
s, ok := stat.(*syscall.Stat_t)
|
|
||||||
|
|
||||||
if ok {
|
|
||||||
inode = s.Ino
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
|
||||||
s, ok := stat.(*syscall.Stat_t)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
|
||||||
}
|
|
||||||
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func major(device uint64) uint64 {
|
|
||||||
return (device >> 8) & 0xfff
|
|
||||||
}
|
|
||||||
|
|
||||||
func minor(device uint64) uint64 {
|
|
||||||
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
|
||||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
|
||||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
|
||||||
if unshare.IsRootless() {
|
|
||||||
// cannot create a device if running in user namespace
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
mode := uint32(hdr.Mode & 07777)
|
|
||||||
switch hdr.Typeflag {
|
|
||||||
case tar.TypeBlock:
|
|
||||||
mode |= unix.S_IFBLK
|
|
||||||
case tar.TypeChar:
|
|
||||||
mode |= unix.S_IFCHR
|
|
||||||
case tar.TypeFifo:
|
|
||||||
mode |= unix.S_IFIFO
|
|
||||||
}
|
|
||||||
|
|
||||||
return system.Mknod(path, mode, uint64(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
|
|
||||||
permissionsMask := hdrInfo.Mode()
|
|
||||||
if forceMask != nil {
|
|
||||||
permissionsMask = *forceMask
|
|
||||||
}
|
|
||||||
p, err := unix.BytePtrFromString(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, _, e1 := unix.Syscall(unix.SYS_LCHMOD, uintptr(unsafe.Pointer(p)), uintptr(permissionsMask), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
return e1
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hardlink without following symlinks
|
|
||||||
func handleLLink(targetPath string, path string) error {
|
|
||||||
return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)
|
|
||||||
}
|
|
19
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
19
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
@ -189,3 +189,22 @@ func GetFileOwner(path string) (uint32, uint32, uint32, error) {
|
|||||||
}
|
}
|
||||||
return 0, 0, uint32(f.Mode()), nil
|
return 0, 0, uint32(f.Mode()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
|
||||||
|
permissionsMask := hdrInfo.Mode()
|
||||||
|
if forceMask != nil {
|
||||||
|
permissionsMask = *forceMask
|
||||||
|
}
|
||||||
|
if hdr.Typeflag == tar.TypeLink {
|
||||||
|
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||||
|
if err := os.Chmod(path, permissionsMask); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||||
|
if err := os.Chmod(path, permissionsMask); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
1
vendor/github.com/containers/storage/pkg/archive/archive_other.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/archive/archive_other.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package archive
|
package archive
|
||||||
|
27
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
27
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
//go:build !windows && !freebsd
|
//go:build !windows
|
||||||
// +build !windows,!freebsd
|
// +build !windows
|
||||||
|
|
||||||
package archive
|
package archive
|
||||||
|
|
||||||
@ -50,8 +50,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
|
|||||||
// Currently go does not fill in the major/minors
|
// Currently go does not fill in the major/minors
|
||||||
if s.Mode&unix.S_IFBLK != 0 ||
|
if s.Mode&unix.S_IFBLK != 0 ||
|
||||||
s.Mode&unix.S_IFCHR != 0 {
|
s.Mode&unix.S_IFCHR != 0 {
|
||||||
hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
|
hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert
|
||||||
hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
|
hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,25 +101,6 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
|||||||
return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor))
|
return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor))
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
|
|
||||||
permissionsMask := hdrInfo.Mode()
|
|
||||||
if forceMask != nil {
|
|
||||||
permissionsMask = *forceMask
|
|
||||||
}
|
|
||||||
if hdr.Typeflag == tar.TypeLink {
|
|
||||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
|
||||||
if err := os.Chmod(path, permissionsMask); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
|
||||||
if err := os.Chmod(path, permissionsMask); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hardlink without symlinks
|
// Hardlink without symlinks
|
||||||
func handleLLink(targetPath, path string) error {
|
func handleLLink(targetPath, path string) error {
|
||||||
// Note: on Linux, the link syscall will not follow symlinks.
|
// Note: on Linux, the link syscall will not follow symlinks.
|
||||||
|
2
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
@ -56,7 +56,7 @@ func (change *Change) String() string {
|
|||||||
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// for sort.Sort
|
// changesByPath implements sort.Interface.
|
||||||
type changesByPath []Change
|
type changesByPath []Change
|
||||||
|
|
||||||
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||||
|
2
vendor/github.com/containers/storage/pkg/archive/changes_unix.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/archive/changes_unix.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package archive
|
package archive
|
||||||
@ -29,6 +30,7 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta
|
|||||||
if oldStat.Mode() != newStat.Mode() ||
|
if oldStat.Mode() != newStat.Mode() ||
|
||||||
ownerChanged ||
|
ownerChanged ||
|
||||||
oldStat.Rdev() != newStat.Rdev() ||
|
oldStat.Rdev() != newStat.Rdev() ||
|
||||||
|
oldStat.Flags() != newStat.Flags() ||
|
||||||
// Don't look at size for dirs, its not a good measure of change
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
||||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||||
|
1
vendor/github.com/containers/storage/pkg/archive/copy_unix.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/archive/copy_unix.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package archive
|
package archive
|
||||||
|
21
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
21
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
@ -145,6 +145,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if _, exists := unpackedPaths[path]; !exists {
|
if _, exists := unpackedPaths[path]; !exists {
|
||||||
|
if err := resetImmutable(path, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
err := os.RemoveAll(path)
|
err := os.RemoveAll(path)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -156,6 +159,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||||||
} else {
|
} else {
|
||||||
originalBase := base[len(WhiteoutPrefix):]
|
originalBase := base[len(WhiteoutPrefix):]
|
||||||
originalPath := filepath.Join(dir, originalBase)
|
originalPath := filepath.Join(dir, originalBase)
|
||||||
|
if err := resetImmutable(originalPath, nil); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
if err := os.RemoveAll(originalPath); err != nil {
|
if err := os.RemoveAll(originalPath); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -165,7 +171,15 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||||||
// The only exception is when it is a directory *and* the file from
|
// The only exception is when it is a directory *and* the file from
|
||||||
// the layer is also a directory. Then we want to merge them (i.e.
|
// the layer is also a directory. Then we want to merge them (i.e.
|
||||||
// just apply the metadata from the layer).
|
// just apply the metadata from the layer).
|
||||||
|
//
|
||||||
|
// We always reset the immutable flag (if present) to allow metadata
|
||||||
|
// changes and to allow directory modification. The flag will be
|
||||||
|
// re-applied based on the contents of hdr either at the end for
|
||||||
|
// directories or in createTarFile otherwise.
|
||||||
if fi, err := os.Lstat(path); err == nil {
|
if fi, err := os.Lstat(path); err == nil {
|
||||||
|
if err := resetImmutable(path, &fi); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||||
if err := os.RemoveAll(path); err != nil {
|
if err := os.RemoveAll(path); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -215,6 +229,9 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
if err := WriteFileFlagsFromTarHeader(path, hdr); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return size, nil
|
return size, nil
|
||||||
@ -245,7 +262,9 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
defer func() {
|
||||||
|
_, _ = system.Umask(oldmask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above.
|
||||||
|
}()
|
||||||
|
|
||||||
if decompress {
|
if decompress {
|
||||||
layer, err = DecompressStream(layer)
|
layer, err = DecompressStream(layer)
|
||||||
|
167
vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
generated
vendored
Normal file
167
vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
//go:build freebsd
|
||||||
|
// +build freebsd
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"math/bits"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
paxSCHILYFflags = "SCHILY.fflags"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
flagNameToValue = map[string]uint32{
|
||||||
|
"sappnd": system.SF_APPEND,
|
||||||
|
"sappend": system.SF_APPEND,
|
||||||
|
"arch": system.SF_ARCHIVED,
|
||||||
|
"archived": system.SF_ARCHIVED,
|
||||||
|
"schg": system.SF_IMMUTABLE,
|
||||||
|
"schange": system.SF_IMMUTABLE,
|
||||||
|
"simmutable": system.SF_IMMUTABLE,
|
||||||
|
"sunlnk": system.SF_NOUNLINK,
|
||||||
|
"sunlink": system.SF_NOUNLINK,
|
||||||
|
"snapshot": system.SF_SNAPSHOT,
|
||||||
|
"uappnd": system.UF_APPEND,
|
||||||
|
"uappend": system.UF_APPEND,
|
||||||
|
"uarch": system.UF_ARCHIVE,
|
||||||
|
"uarchive": system.UF_ARCHIVE,
|
||||||
|
"hidden": system.UF_HIDDEN,
|
||||||
|
"uhidden": system.UF_HIDDEN,
|
||||||
|
"uchg": system.UF_IMMUTABLE,
|
||||||
|
"uchange": system.UF_IMMUTABLE,
|
||||||
|
"uimmutable": system.UF_IMMUTABLE,
|
||||||
|
"uunlnk": system.UF_NOUNLINK,
|
||||||
|
"uunlink": system.UF_NOUNLINK,
|
||||||
|
"offline": system.UF_OFFLINE,
|
||||||
|
"uoffline": system.UF_OFFLINE,
|
||||||
|
"opaque": system.UF_OPAQUE,
|
||||||
|
"rdonly": system.UF_READONLY,
|
||||||
|
"urdonly": system.UF_READONLY,
|
||||||
|
"readonly": system.UF_READONLY,
|
||||||
|
"ureadonly": system.UF_READONLY,
|
||||||
|
"reparse": system.UF_REPARSE,
|
||||||
|
"ureparse": system.UF_REPARSE,
|
||||||
|
"sparse": system.UF_SPARSE,
|
||||||
|
"usparse": system.UF_SPARSE,
|
||||||
|
"system": system.UF_SYSTEM,
|
||||||
|
"usystem": system.UF_SYSTEM,
|
||||||
|
}
|
||||||
|
// Only include the short names for the reverse map
|
||||||
|
flagValueToName = map[uint32]string{
|
||||||
|
system.SF_APPEND: "sappnd",
|
||||||
|
system.SF_ARCHIVED: "arch",
|
||||||
|
system.SF_IMMUTABLE: "schg",
|
||||||
|
system.SF_NOUNLINK: "sunlnk",
|
||||||
|
system.SF_SNAPSHOT: "snapshot",
|
||||||
|
system.UF_APPEND: "uappnd",
|
||||||
|
system.UF_ARCHIVE: "uarch",
|
||||||
|
system.UF_HIDDEN: "hidden",
|
||||||
|
system.UF_IMMUTABLE: "uchg",
|
||||||
|
system.UF_NOUNLINK: "uunlnk",
|
||||||
|
system.UF_OFFLINE: "offline",
|
||||||
|
system.UF_OPAQUE: "opaque",
|
||||||
|
system.UF_READONLY: "rdonly",
|
||||||
|
system.UF_REPARSE: "reparse",
|
||||||
|
system.UF_SPARSE: "sparse",
|
||||||
|
system.UF_SYSTEM: "system",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseFileFlags(fflags string) (uint32, uint32, error) {
|
||||||
|
var set, clear uint32 = 0, 0
|
||||||
|
for _, fflag := range strings.Split(fflags, ",") {
|
||||||
|
isClear := false
|
||||||
|
if strings.HasPrefix(fflag, "no") {
|
||||||
|
isClear = true
|
||||||
|
fflag = strings.TrimPrefix(fflag, "no")
|
||||||
|
}
|
||||||
|
if value, ok := flagNameToValue[fflag]; ok {
|
||||||
|
if isClear {
|
||||||
|
clear |= value
|
||||||
|
} else {
|
||||||
|
set |= value
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return 0, 0, fmt.Errorf("parsing file flags, unrecognised token: %s", fflag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return set, clear, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatFileFlags(fflags uint32) (string, error) {
|
||||||
|
var res = []string{}
|
||||||
|
for fflags != 0 {
|
||||||
|
// Extract lowest set bit
|
||||||
|
fflag := uint32(1) << bits.TrailingZeros32(fflags)
|
||||||
|
if name, ok := flagValueToName[fflag]; ok {
|
||||||
|
res = append(res, name)
|
||||||
|
} else {
|
||||||
|
return "", fmt.Errorf("formatting file flags, unrecognised flag: %x", fflag)
|
||||||
|
}
|
||||||
|
fflags &= ^fflag
|
||||||
|
}
|
||||||
|
return strings.Join(res, ","), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error {
|
||||||
|
st, err := system.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fflags, err := formatFileFlags(st.Flags())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if fflags != "" {
|
||||||
|
if hdr.PAXRecords == nil {
|
||||||
|
hdr.PAXRecords = map[string]string{}
|
||||||
|
}
|
||||||
|
hdr.PAXRecords[paxSCHILYFflags] = fflags
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error {
|
||||||
|
if fflags, ok := hdr.PAXRecords[paxSCHILYFflags]; ok {
|
||||||
|
var set, clear uint32
|
||||||
|
set, clear, err := parseFileFlags(fflags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply the delta to the existing file flags
|
||||||
|
st, err := system.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return system.Lchflags(path, (st.Flags() & ^clear)|set)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resetImmutable(path string, fi *os.FileInfo) error {
|
||||||
|
var flags uint32
|
||||||
|
if fi != nil {
|
||||||
|
flags = (*fi).Sys().(*syscall.Stat_t).Flags
|
||||||
|
} else {
|
||||||
|
st, err := system.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
flags = st.Flags()
|
||||||
|
}
|
||||||
|
if flags&(system.SF_IMMUTABLE|system.UF_IMMUTABLE) != 0 {
|
||||||
|
flags &= ^(system.SF_IMMUTABLE | system.UF_IMMUTABLE)
|
||||||
|
return system.Lchflags(path, flags)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
21
vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go
generated
vendored
Normal file
21
vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
//go:build !freebsd
|
||||||
|
// +build !freebsd
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ReadFileFlagsToTarHeader(path string, hdr *tar.Header) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func WriteFileFlagsFromTarHeader(path string, hdr *tar.Header) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resetImmutable(path string, fi *os.FileInfo) error {
|
||||||
|
return nil
|
||||||
|
}
|
1
vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package archive
|
package archive
|
||||||
|
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
@ -82,7 +82,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r := io.NopCloser(tarArchive)
|
r := tarArchive
|
||||||
if decompress {
|
if decompress {
|
||||||
decompressedArchive, err := archive.DecompressStream(tarArchive)
|
decompressedArchive, err := archive.DecompressStream(tarArchive)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
6
vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
generated
vendored
@ -6,11 +6,7 @@ import (
|
|||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
)
|
)
|
||||||
|
|
||||||
func chroot(path string) error {
|
func invokeUnpack(decompressedArchive io.Reader,
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func invokeUnpack(decompressedArchive io.ReadCloser,
|
|
||||||
dest string,
|
dest string,
|
||||||
options *archive.TarOptions, root string) error {
|
options *archive.TarOptions, root string) error {
|
||||||
return archive.Unpack(decompressedArchive, dest, options)
|
return archive.Unpack(decompressedArchive, dest, options)
|
||||||
|
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
generated
vendored
@ -12,7 +12,7 @@ func chroot(path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func invokeUnpack(decompressedArchive io.ReadCloser,
|
func invokeUnpack(decompressedArchive io.Reader,
|
||||||
dest string,
|
dest string,
|
||||||
options *archive.TarOptions, root string) error {
|
options *archive.TarOptions, root string) error {
|
||||||
// Windows is different to Linux here because Windows does not support
|
// Windows is different to Linux here because Windows does not support
|
||||||
|
1
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !windows && !linux && !darwin
|
||||||
// +build !windows,!linux,!darwin
|
// +build !windows,!linux,!darwin
|
||||||
|
|
||||||
package chrootarchive
|
package chrootarchive
|
||||||
|
4
vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
generated
vendored
@ -27,13 +27,13 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
|
|||||||
|
|
||||||
tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
|
tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
|
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s: %w", dest, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := archive.UnpackLayer(dest, layer, options)
|
s, err := archive.UnpackLayer(dest, layer, options)
|
||||||
os.RemoveAll(tmpDir)
|
os.RemoveAll(tmpDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
|
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %w", layer, dest, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
|
3
vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
generated
vendored
@ -1,3 +1,6 @@
|
|||||||
|
//go:build !windows && !darwin
|
||||||
|
// +build !windows,!darwin
|
||||||
|
|
||||||
package chrootarchive
|
package chrootarchive
|
||||||
|
|
||||||
import jsoniter "github.com/json-iterator/go"
|
import jsoniter "github.com/json-iterator/go"
|
||||||
|
252
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
252
vendor/github.com/containers/storage/pkg/chunked/compression.go
generated
vendored
@ -1,21 +1,10 @@
|
|||||||
package chunked
|
package chunked
|
||||||
|
|
||||||
import (
|
import (
|
||||||
archivetar "archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/containerd/stargz-snapshotter/estargz"
|
|
||||||
"github.com/containers/storage/pkg/chunked/compressor"
|
"github.com/containers/storage/pkg/chunked/compressor"
|
||||||
"github.com/containers/storage/pkg/chunked/internal"
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"github.com/klauspost/pgzip"
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
"github.com/vbatts/tar-split/archive/tar"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -29,247 +18,6 @@ const (
|
|||||||
TypeSymlink = internal.TypeSymlink
|
TypeSymlink = internal.TypeSymlink
|
||||||
)
|
)
|
||||||
|
|
||||||
var typesToTar = map[string]byte{
|
|
||||||
TypeReg: tar.TypeReg,
|
|
||||||
TypeLink: tar.TypeLink,
|
|
||||||
TypeChar: tar.TypeChar,
|
|
||||||
TypeBlock: tar.TypeBlock,
|
|
||||||
TypeDir: tar.TypeDir,
|
|
||||||
TypeFifo: tar.TypeFifo,
|
|
||||||
TypeSymlink: tar.TypeSymlink,
|
|
||||||
}
|
|
||||||
|
|
||||||
func typeToTarType(t string) (byte, error) {
|
|
||||||
r, found := typesToTar[t]
|
|
||||||
if !found {
|
|
||||||
return 0, fmt.Errorf("unknown type: %v", t)
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isZstdChunkedFrameMagic(data []byte) bool {
|
|
||||||
if len(data) < 8 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
|
|
||||||
}
|
|
||||||
|
|
||||||
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
|
||||||
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
|
||||||
footerSize := int64(51)
|
|
||||||
if blobSize <= footerSize {
|
|
||||||
return nil, 0, errors.New("blob too small")
|
|
||||||
}
|
|
||||||
chunk := ImageSourceChunk{
|
|
||||||
Offset: uint64(blobSize - footerSize),
|
|
||||||
Length: uint64(footerSize),
|
|
||||||
}
|
|
||||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
var reader io.ReadCloser
|
|
||||||
select {
|
|
||||||
case r := <-parts:
|
|
||||||
reader = r
|
|
||||||
case err := <-errs:
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
defer reader.Close()
|
|
||||||
footer := make([]byte, footerSize)
|
|
||||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Read the ToC offset:
|
|
||||||
- 10 bytes gzip header
|
|
||||||
- 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
|
|
||||||
- 2 bytes Extra: SI1 = 'S', SI2 = 'G'
|
|
||||||
- 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
|
|
||||||
- 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
|
|
||||||
- 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0
|
|
||||||
- 8 bytes gzip footer
|
|
||||||
*/
|
|
||||||
tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
size := int64(blobSize - footerSize - tocOffset)
|
|
||||||
// set a reasonable limit
|
|
||||||
if size > (1<<20)*50 {
|
|
||||||
return nil, 0, errors.New("manifest too big")
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk = ImageSourceChunk{
|
|
||||||
Offset: uint64(tocOffset),
|
|
||||||
Length: uint64(size),
|
|
||||||
}
|
|
||||||
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var tocReader io.ReadCloser
|
|
||||||
select {
|
|
||||||
case r := <-parts:
|
|
||||||
tocReader = r
|
|
||||||
case err := <-errs:
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
defer tocReader.Close()
|
|
||||||
|
|
||||||
r, err := pgzip.NewReader(tocReader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
|
|
||||||
aTar := archivetar.NewReader(r)
|
|
||||||
|
|
||||||
header, err := aTar.Next()
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
// set a reasonable limit
|
|
||||||
if header.Size > (1<<20)*50 {
|
|
||||||
return nil, 0, errors.New("manifest too big")
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestUncompressed := make([]byte, header.Size)
|
|
||||||
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestDigester := digest.Canonical.Digester()
|
|
||||||
manifestChecksum := manifestDigester.Hash()
|
|
||||||
if _, err := manifestChecksum.Write(manifestUncompressed); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
if manifestDigester.Digest() != d {
|
|
||||||
return nil, 0, errors.New("invalid manifest checksum")
|
|
||||||
}
|
|
||||||
|
|
||||||
return manifestUncompressed, tocOffset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
|
||||||
// be specified.
|
|
||||||
// This function uses the io.containers.zstd-chunked. annotations when specified.
|
|
||||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
|
||||||
footerSize := int64(internal.FooterSizeSupported)
|
|
||||||
if blobSize <= footerSize {
|
|
||||||
return nil, 0, errors.New("blob too small")
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
|
|
||||||
if manifestChecksumAnnotation == "" {
|
|
||||||
return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
var offset, length, lengthUncompressed, manifestType uint64
|
|
||||||
|
|
||||||
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
|
|
||||||
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
chunk := ImageSourceChunk{
|
|
||||||
Offset: uint64(blobSize - footerSize),
|
|
||||||
Length: uint64(footerSize),
|
|
||||||
}
|
|
||||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
var reader io.ReadCloser
|
|
||||||
select {
|
|
||||||
case r := <-parts:
|
|
||||||
reader = r
|
|
||||||
case err := <-errs:
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
footer := make([]byte, footerSize)
|
|
||||||
if _, err := io.ReadFull(reader, footer); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
offset = binary.LittleEndian.Uint64(footer[0:8])
|
|
||||||
length = binary.LittleEndian.Uint64(footer[8:16])
|
|
||||||
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
|
|
||||||
manifestType = binary.LittleEndian.Uint64(footer[24:32])
|
|
||||||
if !isZstdChunkedFrameMagic(footer[32:40]) {
|
|
||||||
return nil, 0, errors.New("invalid magic number")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if manifestType != internal.ManifestTypeCRFS {
|
|
||||||
return nil, 0, errors.New("invalid manifest type")
|
|
||||||
}
|
|
||||||
|
|
||||||
// set a reasonable limit
|
|
||||||
if length > (1<<20)*50 {
|
|
||||||
return nil, 0, errors.New("manifest too big")
|
|
||||||
}
|
|
||||||
if lengthUncompressed > (1<<20)*50 {
|
|
||||||
return nil, 0, errors.New("manifest too big")
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk := ImageSourceChunk{
|
|
||||||
Offset: offset,
|
|
||||||
Length: length,
|
|
||||||
}
|
|
||||||
|
|
||||||
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
var reader io.ReadCloser
|
|
||||||
select {
|
|
||||||
case r := <-parts:
|
|
||||||
reader = r
|
|
||||||
case err := <-errs:
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
manifest := make([]byte, length)
|
|
||||||
if _, err := io.ReadFull(reader, manifest); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestDigester := digest.Canonical.Digester()
|
|
||||||
manifestChecksum := manifestDigester.Hash()
|
|
||||||
if _, err := manifestChecksum.Write(manifest); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := digest.Parse(manifestChecksumAnnotation)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
if manifestDigester.Digest() != d {
|
|
||||||
return nil, 0, errors.New("invalid manifest checksum")
|
|
||||||
}
|
|
||||||
|
|
||||||
decoder, err := zstd.NewReader(nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
defer decoder.Close()
|
|
||||||
|
|
||||||
b := make([]byte, 0, lengthUncompressed)
|
|
||||||
if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
|
|
||||||
return decoded, int64(offset), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return manifest, int64(offset), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||||
// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
|
// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
|
||||||
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
|
259
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
Normal file
259
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
|||||||
|
package chunked
|
||||||
|
|
||||||
|
import (
|
||||||
|
archivetar "archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/containerd/stargz-snapshotter/estargz"
|
||||||
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
|
"github.com/klauspost/pgzip"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
|
)
|
||||||
|
|
||||||
|
var typesToTar = map[string]byte{
|
||||||
|
TypeReg: tar.TypeReg,
|
||||||
|
TypeLink: tar.TypeLink,
|
||||||
|
TypeChar: tar.TypeChar,
|
||||||
|
TypeBlock: tar.TypeBlock,
|
||||||
|
TypeDir: tar.TypeDir,
|
||||||
|
TypeFifo: tar.TypeFifo,
|
||||||
|
TypeSymlink: tar.TypeSymlink,
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeToTarType(t string) (byte, error) {
|
||||||
|
r, found := typesToTar[t]
|
||||||
|
if !found {
|
||||||
|
return 0, fmt.Errorf("unknown type: %v", t)
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZstdChunkedFrameMagic(data []byte) bool {
|
||||||
|
if len(data) < 8 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
|
||||||
|
}
|
||||||
|
|
||||||
|
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
||||||
|
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
|
||||||
|
footerSize := int64(51)
|
||||||
|
if blobSize <= footerSize {
|
||||||
|
return nil, 0, errors.New("blob too small")
|
||||||
|
}
|
||||||
|
chunk := ImageSourceChunk{
|
||||||
|
Offset: uint64(blobSize - footerSize),
|
||||||
|
Length: uint64(footerSize),
|
||||||
|
}
|
||||||
|
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
var reader io.ReadCloser
|
||||||
|
select {
|
||||||
|
case r := <-parts:
|
||||||
|
reader = r
|
||||||
|
case err := <-errs:
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
footer := make([]byte, footerSize)
|
||||||
|
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Read the ToC offset:
|
||||||
|
- 10 bytes gzip header
|
||||||
|
- 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
|
||||||
|
- 2 bytes Extra: SI1 = 'S', SI2 = 'G'
|
||||||
|
- 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
|
||||||
|
- 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
|
||||||
|
- 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0
|
||||||
|
- 8 bytes gzip footer
|
||||||
|
*/
|
||||||
|
tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
size := int64(blobSize - footerSize - tocOffset)
|
||||||
|
// set a reasonable limit
|
||||||
|
if size > (1<<20)*50 {
|
||||||
|
return nil, 0, errors.New("manifest too big")
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk = ImageSourceChunk{
|
||||||
|
Offset: uint64(tocOffset),
|
||||||
|
Length: uint64(size),
|
||||||
|
}
|
||||||
|
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var tocReader io.ReadCloser
|
||||||
|
select {
|
||||||
|
case r := <-parts:
|
||||||
|
tocReader = r
|
||||||
|
case err := <-errs:
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
defer tocReader.Close()
|
||||||
|
|
||||||
|
r, err := pgzip.NewReader(tocReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
aTar := archivetar.NewReader(r)
|
||||||
|
|
||||||
|
header, err := aTar.Next()
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
// set a reasonable limit
|
||||||
|
if header.Size > (1<<20)*50 {
|
||||||
|
return nil, 0, errors.New("manifest too big")
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestUncompressed := make([]byte, header.Size)
|
||||||
|
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestDigester := digest.Canonical.Digester()
|
||||||
|
manifestChecksum := manifestDigester.Hash()
|
||||||
|
if _, err := manifestChecksum.Write(manifestUncompressed); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
if manifestDigester.Digest() != d {
|
||||||
|
return nil, 0, errors.New("invalid manifest checksum")
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifestUncompressed, tocOffset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
|
||||||
|
// be specified.
|
||||||
|
// This function uses the io.github.containers.zstd-chunked. annotations when specified.
|
||||||
|
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
|
||||||
|
footerSize := int64(internal.FooterSizeSupported)
|
||||||
|
if blobSize <= footerSize {
|
||||||
|
return nil, 0, errors.New("blob too small")
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
|
||||||
|
if manifestChecksumAnnotation == "" {
|
||||||
|
return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset, length, lengthUncompressed, manifestType uint64
|
||||||
|
|
||||||
|
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
|
||||||
|
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chunk := ImageSourceChunk{
|
||||||
|
Offset: uint64(blobSize - footerSize),
|
||||||
|
Length: uint64(footerSize),
|
||||||
|
}
|
||||||
|
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
var reader io.ReadCloser
|
||||||
|
select {
|
||||||
|
case r := <-parts:
|
||||||
|
reader = r
|
||||||
|
case err := <-errs:
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
footer := make([]byte, footerSize)
|
||||||
|
if _, err := io.ReadFull(reader, footer); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset = binary.LittleEndian.Uint64(footer[0:8])
|
||||||
|
length = binary.LittleEndian.Uint64(footer[8:16])
|
||||||
|
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
|
||||||
|
manifestType = binary.LittleEndian.Uint64(footer[24:32])
|
||||||
|
if !isZstdChunkedFrameMagic(footer[32:40]) {
|
||||||
|
return nil, 0, errors.New("invalid magic number")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if manifestType != internal.ManifestTypeCRFS {
|
||||||
|
return nil, 0, errors.New("invalid manifest type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// set a reasonable limit
|
||||||
|
if length > (1<<20)*50 {
|
||||||
|
return nil, 0, errors.New("manifest too big")
|
||||||
|
}
|
||||||
|
if lengthUncompressed > (1<<20)*50 {
|
||||||
|
return nil, 0, errors.New("manifest too big")
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk := ImageSourceChunk{
|
||||||
|
Offset: offset,
|
||||||
|
Length: length,
|
||||||
|
}
|
||||||
|
|
||||||
|
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
var reader io.ReadCloser
|
||||||
|
select {
|
||||||
|
case r := <-parts:
|
||||||
|
reader = r
|
||||||
|
case err := <-errs:
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest := make([]byte, length)
|
||||||
|
if _, err := io.ReadFull(reader, manifest); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestDigester := digest.Canonical.Digester()
|
||||||
|
manifestChecksum := manifestDigester.Hash()
|
||||||
|
if _, err := manifestChecksum.Write(manifest); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := digest.Parse(manifestChecksumAnnotation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
if manifestDigester.Digest() != d {
|
||||||
|
return nil, 0, errors.New("invalid manifest checksum")
|
||||||
|
}
|
||||||
|
|
||||||
|
decoder, err := zstd.NewReader(nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
defer decoder.Close()
|
||||||
|
|
||||||
|
b := make([]byte, 0, lengthUncompressed)
|
||||||
|
if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
|
||||||
|
return decoded, int64(offset), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifest, int64(offset), nil
|
||||||
|
}
|
12
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
12
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
@ -33,11 +33,11 @@ const (
|
|||||||
holesFinderStateEOF
|
holesFinderStateEOF
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadByte reads a single byte from the underlying reader.
|
// readByte reads a single byte from the underlying reader.
|
||||||
// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil).
|
// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil).
|
||||||
// If there are at least f.THRESHOLD consecutive zeros, then the
|
// If there are at least f.THRESHOLD consecutive zeros, then the
|
||||||
// return value is (N_CONSECUTIVE_ZEROS, '\x00').
|
// return value is (N_CONSECUTIVE_ZEROS, '\x00').
|
||||||
func (f *holesFinder) ReadByte() (int64, byte, error) {
|
func (f *holesFinder) readByte() (int64, byte, error) {
|
||||||
for {
|
for {
|
||||||
switch f.state {
|
switch f.state {
|
||||||
// reading the file stream
|
// reading the file stream
|
||||||
@ -78,7 +78,7 @@ func (f *holesFinder) ReadByte() (int64, byte, error) {
|
|||||||
f.state = holesFinderStateFound
|
f.state = holesFinderStateFound
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if f.reader.UnreadByte(); err != nil {
|
if err := f.reader.UnreadByte(); err != nil {
|
||||||
return 0, 0, err
|
return 0, 0, err
|
||||||
}
|
}
|
||||||
f.state = holesFinderStateRead
|
f.state = holesFinderStateRead
|
||||||
@ -95,7 +95,7 @@ func (f *holesFinder) ReadByte() (int64, byte, error) {
|
|||||||
return holeLen, 0, nil
|
return holeLen, 0, nil
|
||||||
}
|
}
|
||||||
if b != 0 {
|
if b != 0 {
|
||||||
if f.reader.UnreadByte(); err != nil {
|
if err := f.reader.UnreadByte(); err != nil {
|
||||||
return 0, 0, err
|
return 0, 0, err
|
||||||
}
|
}
|
||||||
f.state = holesFinderStateRead
|
f.state = holesFinderStateRead
|
||||||
@ -159,7 +159,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(b); i++ {
|
for i := 0; i < len(b); i++ {
|
||||||
holeLen, n, err := rc.reader.ReadByte()
|
holeLen, n, err := rc.reader.readByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
rc.closed = true
|
rc.closed = true
|
||||||
@ -429,7 +429,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
ch <- writeZstdChunkedStream(out, metadata, r, level)
|
ch <- writeZstdChunkedStream(out, metadata, r, level)
|
||||||
io.Copy(io.Discard, r)
|
_, _ = io.Copy(io.Discard, r) // Ordinarily writeZstdChunkedStream consumes all of r. If it fails, ensure the write end never blocks and eventually terminates.
|
||||||
r.Close()
|
r.Close()
|
||||||
close(ch)
|
close(ch)
|
||||||
}()
|
}()
|
||||||
|
8
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
8
vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
generated
vendored
@ -88,8 +88,8 @@ func GetType(t byte) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ManifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
|
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
|
||||||
ManifestInfoKey = "io.containers.zstd-chunked.manifest-position"
|
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
|
||||||
|
|
||||||
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
||||||
ManifestTypeCRFS = 1
|
ManifestTypeCRFS = 1
|
||||||
@ -114,7 +114,7 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var size []byte = make([]byte, 4)
|
size := make([]byte, 4)
|
||||||
binary.LittleEndian.PutUint32(size, uint32(len(data)))
|
binary.LittleEndian.PutUint32(size, uint32(len(data)))
|
||||||
if _, err := dest.Write(size); err != nil {
|
if _, err := dest.Write(size); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -168,7 +168,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store the offset to the manifest and its size in LE order
|
// Store the offset to the manifest and its size in LE order
|
||||||
var manifestDataLE []byte = make([]byte, FooterSizeSupported)
|
manifestDataLE := make([]byte, FooterSizeSupported)
|
||||||
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
|
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
|
||||||
binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
|
binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
|
||||||
binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
|
binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
|
||||||
|
2
vendor/github.com/containers/storage/pkg/chunked/storage.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/storage.go
generated
vendored
@ -17,7 +17,7 @@ type ImageSourceSeekable interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ErrBadRequest is returned when the request is not valid
|
// ErrBadRequest is returned when the request is not valid
|
||||||
type ErrBadRequest struct {
|
type ErrBadRequest struct { //nolint: errname
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e ErrBadRequest) Error() string {
|
func (e ErrBadRequest) Error() string {
|
||||||
|
1
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
1
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo && !libdm_no_deferred_remove
|
||||||
// +build linux,cgo,!libdm_no_deferred_remove
|
// +build linux,cgo,!libdm_no_deferred_remove
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
1
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo && !static_build
|
||||||
// +build linux,cgo,!static_build
|
// +build linux,cgo,!static_build
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo && libdm_no_deferred_remove
|
||||||
// +build linux,cgo,libdm_no_deferred_remove
|
// +build linux,cgo,libdm_no_deferred_remove
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
1
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo && static_build
|
||||||
// +build linux,cgo,static_build
|
// +build linux,cgo,static_build
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
1
vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package devicemapper
|
package devicemapper
|
||||||
|
1
vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
package dmesg
|
package dmesg
|
||||||
|
6
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
@ -321,14 +321,14 @@ func ReadSymlinkedDirectory(path string) (string, error) {
|
|||||||
var realPath string
|
var realPath string
|
||||||
var err error
|
var err error
|
||||||
if realPath, err = filepath.Abs(path); err != nil {
|
if realPath, err = filepath.Abs(path); err != nil {
|
||||||
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
return "", fmt.Errorf("unable to get absolute path for %s: %w", path, err)
|
||||||
}
|
}
|
||||||
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||||
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
return "", fmt.Errorf("failed to canonicalise path for %s: %w", path, err)
|
||||||
}
|
}
|
||||||
realPathInfo, err := os.Stat(realPath)
|
realPathInfo, err := os.Stat(realPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
return "", fmt.Errorf("failed to stat target '%s' of '%s': %w", realPath, path, err)
|
||||||
}
|
}
|
||||||
if !realPathInfo.Mode().IsDir() {
|
if !realPathInfo.Mode().IsDir() {
|
||||||
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
||||||
|
1
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux && !darwin && !freebsd
|
||||||
// +build !linux,!darwin,!freebsd
|
// +build !linux,!darwin,!freebsd
|
||||||
|
|
||||||
package homedir
|
package homedir
|
||||||
|
2
vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
generated
vendored
@ -63,7 +63,7 @@ func StickRuntimeDirContents(files []string) ([]string, error) {
|
|||||||
runtimeDir, err := GetRuntimeDir()
|
runtimeDir, err := GetRuntimeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// ignore error if runtimeDir is empty
|
// ignore error if runtimeDir is empty
|
||||||
return nil, nil
|
return nil, nil //nolint: nilerr
|
||||||
}
|
}
|
||||||
runtimeDir, err = filepath.Abs(runtimeDir)
|
runtimeDir, err = filepath.Abs(runtimeDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
4
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
@ -2,6 +2,7 @@ package idtools
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
@ -359,7 +360,8 @@ func parseSubidFile(path, username string) (ranges, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func checkChownErr(err error, name string, uid, gid int) error {
|
func checkChownErr(err error, name string, uid, gid int) error {
|
||||||
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
|
var e *os.PathError
|
||||||
|
if errors.As(err, &e) && e.Err == syscall.EINVAL {
|
||||||
return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err)
|
return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
1
vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux || !libsubid || !cgo
|
||||||
// +build !linux !libsubid !cgo
|
// +build !linux !libsubid !cgo
|
||||||
|
|
||||||
package idtools
|
package idtools
|
||||||
|
1
vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package idtools
|
package idtools
|
||||||
|
13
vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
generated
vendored
13
vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
generated
vendored
@ -15,7 +15,7 @@ type AtomicFileWriterOptions struct {
|
|||||||
NoSync bool
|
NoSync bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultWriterOptions AtomicFileWriterOptions = AtomicFileWriterOptions{}
|
var defaultWriterOptions = AtomicFileWriterOptions{}
|
||||||
|
|
||||||
// SetDefaultOptions overrides the default options used when creating an
|
// SetDefaultOptions overrides the default options used when creating an
|
||||||
// atomic file writer.
|
// atomic file writer.
|
||||||
@ -27,6 +27,13 @@ func SetDefaultOptions(opts AtomicFileWriterOptions) {
|
|||||||
// temporary file and closing it atomically changes the temporary file to
|
// temporary file and closing it atomically changes the temporary file to
|
||||||
// destination path. Writing and closing concurrently is not allowed.
|
// destination path. Writing and closing concurrently is not allowed.
|
||||||
func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) {
|
func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) {
|
||||||
|
return newAtomicFileWriter(filename, perm, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newAtomicFileWriter returns WriteCloser so that writing to it writes to a
|
||||||
|
// temporary file and closing it atomically changes the temporary file to
|
||||||
|
// destination path. Writing and closing concurrently is not allowed.
|
||||||
|
func newAtomicFileWriter(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (*atomicFileWriter, error) {
|
||||||
f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -55,14 +62,14 @@ func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, err
|
|||||||
|
|
||||||
// AtomicWriteFile atomically writes data to a file named by filename.
|
// AtomicWriteFile atomically writes data to a file named by filename.
|
||||||
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||||
f, err := NewAtomicFileWriter(filename, perm)
|
f, err := newAtomicFileWriter(filename, perm, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
n, err := f.Write(data)
|
n, err := f.Write(data)
|
||||||
if err == nil && n < len(data) {
|
if err == nil && n < len(data) {
|
||||||
err = io.ErrShortWrite
|
err = io.ErrShortWrite
|
||||||
f.(*atomicFileWriter).writeErr = err
|
f.writeErr = err
|
||||||
}
|
}
|
||||||
if err1 := f.Close(); err == nil {
|
if err1 := f.Close(); err == nil {
|
||||||
err = err1
|
err = err1
|
||||||
|
1
vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package ioutils
|
package ioutils
|
||||||
|
13
vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
generated
vendored
13
vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
generated
vendored
@ -17,10 +17,6 @@ type Locker interface {
|
|||||||
// - tried to lock a read-only lock-file
|
// - tried to lock a read-only lock-file
|
||||||
Lock()
|
Lock()
|
||||||
|
|
||||||
// Acquire a writer lock recursively, allowing for recursive acquisitions
|
|
||||||
// within the same process space.
|
|
||||||
RecursiveLock()
|
|
||||||
|
|
||||||
// Unlock the lock.
|
// Unlock the lock.
|
||||||
// The default unix implementation panics if:
|
// The default unix implementation panics if:
|
||||||
// - unlocking an unlocked lock
|
// - unlocking an unlocked lock
|
||||||
@ -44,8 +40,13 @@ type Locker interface {
|
|||||||
// IsReadWrite() checks if the lock file is read-write
|
// IsReadWrite() checks if the lock file is read-write
|
||||||
IsReadWrite() bool
|
IsReadWrite() bool
|
||||||
|
|
||||||
// Locked() checks if lock is locked for writing by a thread in this process
|
// AssertLocked() can be used by callers that _know_ that they hold the lock (for reading or writing), for sanity checking.
|
||||||
Locked() bool
|
// It might do nothing at all, or it may panic if the caller is not the owner of this lock.
|
||||||
|
AssertLocked()
|
||||||
|
|
||||||
|
// AssertLocked() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking.
|
||||||
|
// It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing.
|
||||||
|
AssertLockedForWriting()
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
76
vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
generated
vendored
76
vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
generated
vendored
@ -30,7 +30,6 @@ type lockfile struct {
|
|||||||
locktype int16
|
locktype int16
|
||||||
locked bool
|
locked bool
|
||||||
ro bool
|
ro bool
|
||||||
recursive bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
|
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
|
||||||
@ -79,7 +78,7 @@ func openLock(path string, ro bool) (fd int, err error) {
|
|||||||
}
|
}
|
||||||
fd, err = unix.Open(path, flags, 0o644)
|
fd, err = unix.Open(path, flags, 0o644)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return fd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// the directory of the lockfile seems to be removed, try to create it
|
// the directory of the lockfile seems to be removed, try to create it
|
||||||
@ -131,10 +130,10 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
|
|||||||
|
|
||||||
// lock locks the lockfile via FCTNL(2) based on the specified type and
|
// lock locks the lockfile via FCTNL(2) based on the specified type and
|
||||||
// command.
|
// command.
|
||||||
func (l *lockfile) lock(lType int16, recursive bool) {
|
func (l *lockfile) lock(lType int16) {
|
||||||
lk := unix.Flock_t{
|
lk := unix.Flock_t{
|
||||||
Type: lType,
|
Type: lType,
|
||||||
Whence: int16(os.SEEK_SET),
|
Whence: int16(unix.SEEK_SET),
|
||||||
Start: 0,
|
Start: 0,
|
||||||
Len: 0,
|
Len: 0,
|
||||||
}
|
}
|
||||||
@ -142,13 +141,7 @@ func (l *lockfile) lock(lType int16, recursive bool) {
|
|||||||
case unix.F_RDLCK:
|
case unix.F_RDLCK:
|
||||||
l.rwMutex.RLock()
|
l.rwMutex.RLock()
|
||||||
case unix.F_WRLCK:
|
case unix.F_WRLCK:
|
||||||
if recursive {
|
l.rwMutex.Lock()
|
||||||
// NOTE: that's okay as recursive is only set in RecursiveLock(), so
|
|
||||||
// there's no need to protect against hypothetical RDLCK cases.
|
|
||||||
l.rwMutex.RLock()
|
|
||||||
} else {
|
|
||||||
l.rwMutex.Lock()
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType))
|
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType))
|
||||||
}
|
}
|
||||||
@ -171,7 +164,6 @@ func (l *lockfile) lock(lType int16, recursive bool) {
|
|||||||
}
|
}
|
||||||
l.locktype = lType
|
l.locktype = lType
|
||||||
l.locked = true
|
l.locked = true
|
||||||
l.recursive = recursive
|
|
||||||
l.counter++
|
l.counter++
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -180,30 +172,19 @@ func (l *lockfile) Lock() {
|
|||||||
if l.ro {
|
if l.ro {
|
||||||
panic("can't take write lock on read-only lock file")
|
panic("can't take write lock on read-only lock file")
|
||||||
} else {
|
} else {
|
||||||
l.lock(unix.F_WRLCK, false)
|
l.lock(unix.F_WRLCK)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RecursiveLock locks the lockfile as a writer but allows for recursive
|
|
||||||
// acquisitions within the same process space. Note that RLock() will be called
|
|
||||||
// if it's a lockTypReader lock.
|
|
||||||
func (l *lockfile) RecursiveLock() {
|
|
||||||
if l.ro {
|
|
||||||
l.RLock()
|
|
||||||
} else {
|
|
||||||
l.lock(unix.F_WRLCK, true)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LockRead locks the lockfile as a reader.
|
// LockRead locks the lockfile as a reader.
|
||||||
func (l *lockfile) RLock() {
|
func (l *lockfile) RLock() {
|
||||||
l.lock(unix.F_RDLCK, false)
|
l.lock(unix.F_RDLCK)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlock unlocks the lockfile.
|
// Unlock unlocks the lockfile.
|
||||||
func (l *lockfile) Unlock() {
|
func (l *lockfile) Unlock() {
|
||||||
l.stateMutex.Lock()
|
l.stateMutex.Lock()
|
||||||
if l.locked == false {
|
if !l.locked {
|
||||||
// Panic when unlocking an unlocked lock. That's a violation
|
// Panic when unlocking an unlocked lock. That's a violation
|
||||||
// of the lock semantics and will reveal such.
|
// of the lock semantics and will reveal such.
|
||||||
panic("calling Unlock on unlocked lock")
|
panic("calling Unlock on unlocked lock")
|
||||||
@ -224,7 +205,7 @@ func (l *lockfile) Unlock() {
|
|||||||
// file lock.
|
// file lock.
|
||||||
unix.Close(int(l.fd))
|
unix.Close(int(l.fd))
|
||||||
}
|
}
|
||||||
if l.locktype == unix.F_RDLCK || l.recursive {
|
if l.locktype == unix.F_RDLCK {
|
||||||
l.rwMutex.RUnlock()
|
l.rwMutex.RUnlock()
|
||||||
} else {
|
} else {
|
||||||
l.rwMutex.Unlock()
|
l.rwMutex.Unlock()
|
||||||
@ -232,11 +213,33 @@ func (l *lockfile) Unlock() {
|
|||||||
l.stateMutex.Unlock()
|
l.stateMutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Locked checks if lockfile is locked for writing by a thread in this process.
|
func (l *lockfile) AssertLocked() {
|
||||||
func (l *lockfile) Locked() bool {
|
// DO NOT provide a variant that returns the value of l.locked.
|
||||||
l.stateMutex.Lock()
|
//
|
||||||
defer l.stateMutex.Unlock()
|
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
|
||||||
return l.locked && (l.locktype == unix.F_WRLCK)
|
// we can’t tell the difference.
|
||||||
|
//
|
||||||
|
// Hence, this “AssertLocked” method, which exists only for sanity checks.
|
||||||
|
|
||||||
|
// Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true
|
||||||
|
// with no possible writers.
|
||||||
|
// If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data
|
||||||
|
// without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers.
|
||||||
|
if !l.locked {
|
||||||
|
panic("internal error: lock is not held by the expected owner")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lockfile) AssertLockedForWriting() {
|
||||||
|
// DO NOT provide a variant that returns the current lock state.
|
||||||
|
//
|
||||||
|
// The same caveats as for AssertLocked apply equally.
|
||||||
|
|
||||||
|
l.AssertLocked()
|
||||||
|
// Like AssertLocked, don’t even bother with l.stateMutex.
|
||||||
|
if l.locktype != unix.F_WRLCK {
|
||||||
|
panic("internal error: lock is not held for writing")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Touch updates the lock file with the UID of the user.
|
// Touch updates the lock file with the UID of the user.
|
||||||
@ -265,14 +268,15 @@ func (l *lockfile) Modified() (bool, error) {
|
|||||||
panic("attempted to check last-writer in lockfile without locking it first")
|
panic("attempted to check last-writer in lockfile without locking it first")
|
||||||
}
|
}
|
||||||
defer l.stateMutex.Unlock()
|
defer l.stateMutex.Unlock()
|
||||||
currentLW := make([]byte, len(l.lw))
|
currentLW := make([]byte, lastWriterIDSize)
|
||||||
n, err := unix.Pread(int(l.fd), currentLW, 0)
|
n, err := unix.Pread(int(l.fd), currentLW, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
if n != len(l.lw) {
|
// It is important to handle the partial read case, because
|
||||||
return true, nil
|
// the initial size of the lock file is zero, which is a valid
|
||||||
}
|
// state (no writes yet)
|
||||||
|
currentLW = currentLW[:n]
|
||||||
oldLW := l.lw
|
oldLW := l.lw
|
||||||
l.lw = currentLW
|
l.lw = currentLW
|
||||||
return !bytes.Equal(currentLW, oldLW), nil
|
return !bytes.Equal(currentLW, oldLW), nil
|
||||||
|
26
vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
generated
vendored
26
vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package lockfile
|
package lockfile
|
||||||
@ -36,12 +37,6 @@ func (l *lockfile) Lock() {
|
|||||||
l.locked = true
|
l.locked = true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lockfile) RecursiveLock() {
|
|
||||||
// We don't support Windows but a recursive writer-lock in one process-space
|
|
||||||
// is really a writer lock, so just panic.
|
|
||||||
panic("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *lockfile) RLock() {
|
func (l *lockfile) RLock() {
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
l.locked = true
|
l.locked = true
|
||||||
@ -52,8 +47,23 @@ func (l *lockfile) Unlock() {
|
|||||||
l.mu.Unlock()
|
l.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lockfile) Locked() bool {
|
func (l *lockfile) AssertLocked() {
|
||||||
return l.locked
|
// DO NOT provide a variant that returns the value of l.locked.
|
||||||
|
//
|
||||||
|
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
|
||||||
|
// we can’t tell the difference.
|
||||||
|
//
|
||||||
|
// Hence, this “AssertLocked” method, which exists only for sanity checks.
|
||||||
|
if !l.locked {
|
||||||
|
panic("internal error: lock is not held by the expected owner")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lockfile) AssertLockedForWriting() {
|
||||||
|
// DO NOT provide a variant that returns the current lock state.
|
||||||
|
//
|
||||||
|
// The same caveats as for AssertLocked apply equally.
|
||||||
|
l.AssertLocked() // The current implementation does not distinguish between read and write locks.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lockfile) Modified() (bool, error) {
|
func (l *lockfile) Modified() (bool, error) {
|
||||||
|
1
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package loopback
|
package loopback
|
||||||
|
1
vendor/github.com/containers/storage/pkg/loopback/ioctl.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/loopback/ioctl.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package loopback
|
package loopback
|
||||||
|
1
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package loopback
|
package loopback
|
||||||
|
1
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/loopback/loopback.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build linux && cgo
|
||||||
// +build linux,cgo
|
// +build linux,cgo
|
||||||
|
|
||||||
package loopback
|
package loopback
|
||||||
|
27
vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
generated
vendored
27
vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
generated
vendored
@ -1,16 +1,29 @@
|
|||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package mount
|
package mount
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
func unmount(target string, flags int) error {
|
func unmount(target string, flags int) error {
|
||||||
err := unix.Unmount(target, flags)
|
var err error
|
||||||
if err == nil || err == unix.EINVAL {
|
for i := 0; i < 50; i++ {
|
||||||
// Ignore "not mounted" error here. Note the same error
|
err = unix.Unmount(target, flags)
|
||||||
// can be returned if flags are invalid, so this code
|
switch err {
|
||||||
// assumes that the flags value is always correct.
|
case unix.EBUSY:
|
||||||
return nil
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
case unix.EINVAL, nil:
|
||||||
|
// Ignore "not mounted" error here. Note the same error
|
||||||
|
// can be returned if flags are invalid, so this code
|
||||||
|
// assumes that the flags value is always correct.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
return &mountError{
|
return &mountError{
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user